GNU Linux-libre 5.10.153-gnu1
[releases.git] / fs / ceph / mds_client.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/fs.h>
5 #include <linux/wait.h>
6 #include <linux/slab.h>
7 #include <linux/gfp.h>
8 #include <linux/sched.h>
9 #include <linux/debugfs.h>
10 #include <linux/seq_file.h>
11 #include <linux/ratelimit.h>
12 #include <linux/bits.h>
13 #include <linux/ktime.h>
14
15 #include "super.h"
16 #include "mds_client.h"
17
18 #include <linux/ceph/ceph_features.h>
19 #include <linux/ceph/messenger.h>
20 #include <linux/ceph/decode.h>
21 #include <linux/ceph/pagelist.h>
22 #include <linux/ceph/auth.h>
23 #include <linux/ceph/debugfs.h>
24
25 #define RECONNECT_MAX_SIZE (INT_MAX - PAGE_SIZE)
26
27 /*
28  * A cluster of MDS (metadata server) daemons is responsible for
29  * managing the file system namespace (the directory hierarchy and
30  * inodes) and for coordinating shared access to storage.  Metadata is
31  * partitioning hierarchically across a number of servers, and that
32  * partition varies over time as the cluster adjusts the distribution
33  * in order to balance load.
34  *
35  * The MDS client is primarily responsible to managing synchronous
36  * metadata requests for operations like open, unlink, and so forth.
37  * If there is a MDS failure, we find out about it when we (possibly
38  * request and) receive a new MDS map, and can resubmit affected
39  * requests.
40  *
41  * For the most part, though, we take advantage of a lossless
42  * communications channel to the MDS, and do not need to worry about
43  * timing out or resubmitting requests.
44  *
45  * We maintain a stateful "session" with each MDS we interact with.
46  * Within each session, we sent periodic heartbeat messages to ensure
47  * any capabilities or leases we have been issues remain valid.  If
48  * the session times out and goes stale, our leases and capabilities
49  * are no longer valid.
50  */
51
52 struct ceph_reconnect_state {
53         struct ceph_mds_session *session;
54         int nr_caps, nr_realms;
55         struct ceph_pagelist *pagelist;
56         unsigned msg_version;
57         bool allow_multi;
58 };
59
60 static void __wake_requests(struct ceph_mds_client *mdsc,
61                             struct list_head *head);
62 static void ceph_cap_release_work(struct work_struct *work);
63 static void ceph_cap_reclaim_work(struct work_struct *work);
64
65 static const struct ceph_connection_operations mds_con_ops;
66
67
68 /*
69  * mds reply parsing
70  */
71
72 static int parse_reply_info_quota(void **p, void *end,
73                                   struct ceph_mds_reply_info_in *info)
74 {
75         u8 struct_v, struct_compat;
76         u32 struct_len;
77
78         ceph_decode_8_safe(p, end, struct_v, bad);
79         ceph_decode_8_safe(p, end, struct_compat, bad);
80         /* struct_v is expected to be >= 1. we only
81          * understand encoding with struct_compat == 1. */
82         if (!struct_v || struct_compat != 1)
83                 goto bad;
84         ceph_decode_32_safe(p, end, struct_len, bad);
85         ceph_decode_need(p, end, struct_len, bad);
86         end = *p + struct_len;
87         ceph_decode_64_safe(p, end, info->max_bytes, bad);
88         ceph_decode_64_safe(p, end, info->max_files, bad);
89         *p = end;
90         return 0;
91 bad:
92         return -EIO;
93 }
94
95 /*
96  * parse individual inode info
97  */
98 static int parse_reply_info_in(void **p, void *end,
99                                struct ceph_mds_reply_info_in *info,
100                                u64 features)
101 {
102         int err = 0;
103         u8 struct_v = 0;
104
105         if (features == (u64)-1) {
106                 u32 struct_len;
107                 u8 struct_compat;
108                 ceph_decode_8_safe(p, end, struct_v, bad);
109                 ceph_decode_8_safe(p, end, struct_compat, bad);
110                 /* struct_v is expected to be >= 1. we only understand
111                  * encoding with struct_compat == 1. */
112                 if (!struct_v || struct_compat != 1)
113                         goto bad;
114                 ceph_decode_32_safe(p, end, struct_len, bad);
115                 ceph_decode_need(p, end, struct_len, bad);
116                 end = *p + struct_len;
117         }
118
119         ceph_decode_need(p, end, sizeof(struct ceph_mds_reply_inode), bad);
120         info->in = *p;
121         *p += sizeof(struct ceph_mds_reply_inode) +
122                 sizeof(*info->in->fragtree.splits) *
123                 le32_to_cpu(info->in->fragtree.nsplits);
124
125         ceph_decode_32_safe(p, end, info->symlink_len, bad);
126         ceph_decode_need(p, end, info->symlink_len, bad);
127         info->symlink = *p;
128         *p += info->symlink_len;
129
130         ceph_decode_copy_safe(p, end, &info->dir_layout,
131                               sizeof(info->dir_layout), bad);
132         ceph_decode_32_safe(p, end, info->xattr_len, bad);
133         ceph_decode_need(p, end, info->xattr_len, bad);
134         info->xattr_data = *p;
135         *p += info->xattr_len;
136
137         if (features == (u64)-1) {
138                 /* inline data */
139                 ceph_decode_64_safe(p, end, info->inline_version, bad);
140                 ceph_decode_32_safe(p, end, info->inline_len, bad);
141                 ceph_decode_need(p, end, info->inline_len, bad);
142                 info->inline_data = *p;
143                 *p += info->inline_len;
144                 /* quota */
145                 err = parse_reply_info_quota(p, end, info);
146                 if (err < 0)
147                         goto out_bad;
148                 /* pool namespace */
149                 ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
150                 if (info->pool_ns_len > 0) {
151                         ceph_decode_need(p, end, info->pool_ns_len, bad);
152                         info->pool_ns_data = *p;
153                         *p += info->pool_ns_len;
154                 }
155
156                 /* btime */
157                 ceph_decode_need(p, end, sizeof(info->btime), bad);
158                 ceph_decode_copy(p, &info->btime, sizeof(info->btime));
159
160                 /* change attribute */
161                 ceph_decode_64_safe(p, end, info->change_attr, bad);
162
163                 /* dir pin */
164                 if (struct_v >= 2) {
165                         ceph_decode_32_safe(p, end, info->dir_pin, bad);
166                 } else {
167                         info->dir_pin = -ENODATA;
168                 }
169
170                 /* snapshot birth time, remains zero for v<=2 */
171                 if (struct_v >= 3) {
172                         ceph_decode_need(p, end, sizeof(info->snap_btime), bad);
173                         ceph_decode_copy(p, &info->snap_btime,
174                                          sizeof(info->snap_btime));
175                 } else {
176                         memset(&info->snap_btime, 0, sizeof(info->snap_btime));
177                 }
178
179                 *p = end;
180         } else {
181                 if (features & CEPH_FEATURE_MDS_INLINE_DATA) {
182                         ceph_decode_64_safe(p, end, info->inline_version, bad);
183                         ceph_decode_32_safe(p, end, info->inline_len, bad);
184                         ceph_decode_need(p, end, info->inline_len, bad);
185                         info->inline_data = *p;
186                         *p += info->inline_len;
187                 } else
188                         info->inline_version = CEPH_INLINE_NONE;
189
190                 if (features & CEPH_FEATURE_MDS_QUOTA) {
191                         err = parse_reply_info_quota(p, end, info);
192                         if (err < 0)
193                                 goto out_bad;
194                 } else {
195                         info->max_bytes = 0;
196                         info->max_files = 0;
197                 }
198
199                 info->pool_ns_len = 0;
200                 info->pool_ns_data = NULL;
201                 if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) {
202                         ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
203                         if (info->pool_ns_len > 0) {
204                                 ceph_decode_need(p, end, info->pool_ns_len, bad);
205                                 info->pool_ns_data = *p;
206                                 *p += info->pool_ns_len;
207                         }
208                 }
209
210                 if (features & CEPH_FEATURE_FS_BTIME) {
211                         ceph_decode_need(p, end, sizeof(info->btime), bad);
212                         ceph_decode_copy(p, &info->btime, sizeof(info->btime));
213                         ceph_decode_64_safe(p, end, info->change_attr, bad);
214                 }
215
216                 info->dir_pin = -ENODATA;
217                 /* info->snap_btime remains zero */
218         }
219         return 0;
220 bad:
221         err = -EIO;
222 out_bad:
223         return err;
224 }
225
226 static int parse_reply_info_dir(void **p, void *end,
227                                 struct ceph_mds_reply_dirfrag **dirfrag,
228                                 u64 features)
229 {
230         if (features == (u64)-1) {
231                 u8 struct_v, struct_compat;
232                 u32 struct_len;
233                 ceph_decode_8_safe(p, end, struct_v, bad);
234                 ceph_decode_8_safe(p, end, struct_compat, bad);
235                 /* struct_v is expected to be >= 1. we only understand
236                  * encoding whose struct_compat == 1. */
237                 if (!struct_v || struct_compat != 1)
238                         goto bad;
239                 ceph_decode_32_safe(p, end, struct_len, bad);
240                 ceph_decode_need(p, end, struct_len, bad);
241                 end = *p + struct_len;
242         }
243
244         ceph_decode_need(p, end, sizeof(**dirfrag), bad);
245         *dirfrag = *p;
246         *p += sizeof(**dirfrag) + sizeof(u32) * le32_to_cpu((*dirfrag)->ndist);
247         if (unlikely(*p > end))
248                 goto bad;
249         if (features == (u64)-1)
250                 *p = end;
251         return 0;
252 bad:
253         return -EIO;
254 }
255
256 static int parse_reply_info_lease(void **p, void *end,
257                                   struct ceph_mds_reply_lease **lease,
258                                   u64 features)
259 {
260         if (features == (u64)-1) {
261                 u8 struct_v, struct_compat;
262                 u32 struct_len;
263                 ceph_decode_8_safe(p, end, struct_v, bad);
264                 ceph_decode_8_safe(p, end, struct_compat, bad);
265                 /* struct_v is expected to be >= 1. we only understand
266                  * encoding whose struct_compat == 1. */
267                 if (!struct_v || struct_compat != 1)
268                         goto bad;
269                 ceph_decode_32_safe(p, end, struct_len, bad);
270                 ceph_decode_need(p, end, struct_len, bad);
271                 end = *p + struct_len;
272         }
273
274         ceph_decode_need(p, end, sizeof(**lease), bad);
275         *lease = *p;
276         *p += sizeof(**lease);
277         if (features == (u64)-1)
278                 *p = end;
279         return 0;
280 bad:
281         return -EIO;
282 }
283
284 /*
285  * parse a normal reply, which may contain a (dir+)dentry and/or a
286  * target inode.
287  */
288 static int parse_reply_info_trace(void **p, void *end,
289                                   struct ceph_mds_reply_info_parsed *info,
290                                   u64 features)
291 {
292         int err;
293
294         if (info->head->is_dentry) {
295                 err = parse_reply_info_in(p, end, &info->diri, features);
296                 if (err < 0)
297                         goto out_bad;
298
299                 err = parse_reply_info_dir(p, end, &info->dirfrag, features);
300                 if (err < 0)
301                         goto out_bad;
302
303                 ceph_decode_32_safe(p, end, info->dname_len, bad);
304                 ceph_decode_need(p, end, info->dname_len, bad);
305                 info->dname = *p;
306                 *p += info->dname_len;
307
308                 err = parse_reply_info_lease(p, end, &info->dlease, features);
309                 if (err < 0)
310                         goto out_bad;
311         }
312
313         if (info->head->is_target) {
314                 err = parse_reply_info_in(p, end, &info->targeti, features);
315                 if (err < 0)
316                         goto out_bad;
317         }
318
319         if (unlikely(*p != end))
320                 goto bad;
321         return 0;
322
323 bad:
324         err = -EIO;
325 out_bad:
326         pr_err("problem parsing mds trace %d\n", err);
327         return err;
328 }
329
330 /*
331  * parse readdir results
332  */
333 static int parse_reply_info_readdir(void **p, void *end,
334                                 struct ceph_mds_reply_info_parsed *info,
335                                 u64 features)
336 {
337         u32 num, i = 0;
338         int err;
339
340         err = parse_reply_info_dir(p, end, &info->dir_dir, features);
341         if (err < 0)
342                 goto out_bad;
343
344         ceph_decode_need(p, end, sizeof(num) + 2, bad);
345         num = ceph_decode_32(p);
346         {
347                 u16 flags = ceph_decode_16(p);
348                 info->dir_end = !!(flags & CEPH_READDIR_FRAG_END);
349                 info->dir_complete = !!(flags & CEPH_READDIR_FRAG_COMPLETE);
350                 info->hash_order = !!(flags & CEPH_READDIR_HASH_ORDER);
351                 info->offset_hash = !!(flags & CEPH_READDIR_OFFSET_HASH);
352         }
353         if (num == 0)
354                 goto done;
355
356         BUG_ON(!info->dir_entries);
357         if ((unsigned long)(info->dir_entries + num) >
358             (unsigned long)info->dir_entries + info->dir_buf_size) {
359                 pr_err("dir contents are larger than expected\n");
360                 WARN_ON(1);
361                 goto bad;
362         }
363
364         info->dir_nr = num;
365         while (num) {
366                 struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i;
367                 /* dentry */
368                 ceph_decode_32_safe(p, end, rde->name_len, bad);
369                 ceph_decode_need(p, end, rde->name_len, bad);
370                 rde->name = *p;
371                 *p += rde->name_len;
372                 dout("parsed dir dname '%.*s'\n", rde->name_len, rde->name);
373
374                 /* dentry lease */
375                 err = parse_reply_info_lease(p, end, &rde->lease, features);
376                 if (err)
377                         goto out_bad;
378                 /* inode */
379                 err = parse_reply_info_in(p, end, &rde->inode, features);
380                 if (err < 0)
381                         goto out_bad;
382                 /* ceph_readdir_prepopulate() will update it */
383                 rde->offset = 0;
384                 i++;
385                 num--;
386         }
387
388 done:
389         /* Skip over any unrecognized fields */
390         *p = end;
391         return 0;
392
393 bad:
394         err = -EIO;
395 out_bad:
396         pr_err("problem parsing dir contents %d\n", err);
397         return err;
398 }
399
400 /*
401  * parse fcntl F_GETLK results
402  */
403 static int parse_reply_info_filelock(void **p, void *end,
404                                      struct ceph_mds_reply_info_parsed *info,
405                                      u64 features)
406 {
407         if (*p + sizeof(*info->filelock_reply) > end)
408                 goto bad;
409
410         info->filelock_reply = *p;
411
412         /* Skip over any unrecognized fields */
413         *p = end;
414         return 0;
415 bad:
416         return -EIO;
417 }
418
419
420 #if BITS_PER_LONG == 64
421
422 #define DELEGATED_INO_AVAILABLE         xa_mk_value(1)
423
424 static int ceph_parse_deleg_inos(void **p, void *end,
425                                  struct ceph_mds_session *s)
426 {
427         u32 sets;
428
429         ceph_decode_32_safe(p, end, sets, bad);
430         dout("got %u sets of delegated inodes\n", sets);
431         while (sets--) {
432                 u64 start, len, ino;
433
434                 ceph_decode_64_safe(p, end, start, bad);
435                 ceph_decode_64_safe(p, end, len, bad);
436
437                 /* Don't accept a delegation of system inodes */
438                 if (start < CEPH_INO_SYSTEM_BASE) {
439                         pr_warn_ratelimited("ceph: ignoring reserved inode range delegation (start=0x%llx len=0x%llx)\n",
440                                         start, len);
441                         continue;
442                 }
443                 while (len--) {
444                         int err = xa_insert(&s->s_delegated_inos, ino = start++,
445                                             DELEGATED_INO_AVAILABLE,
446                                             GFP_KERNEL);
447                         if (!err) {
448                                 dout("added delegated inode 0x%llx\n",
449                                      start - 1);
450                         } else if (err == -EBUSY) {
451                                 pr_warn("ceph: MDS delegated inode 0x%llx more than once.\n",
452                                         start - 1);
453                         } else {
454                                 return err;
455                         }
456                 }
457         }
458         return 0;
459 bad:
460         return -EIO;
461 }
462
463 u64 ceph_get_deleg_ino(struct ceph_mds_session *s)
464 {
465         unsigned long ino;
466         void *val;
467
468         xa_for_each(&s->s_delegated_inos, ino, val) {
469                 val = xa_erase(&s->s_delegated_inos, ino);
470                 if (val == DELEGATED_INO_AVAILABLE)
471                         return ino;
472         }
473         return 0;
474 }
475
476 int ceph_restore_deleg_ino(struct ceph_mds_session *s, u64 ino)
477 {
478         return xa_insert(&s->s_delegated_inos, ino, DELEGATED_INO_AVAILABLE,
479                          GFP_KERNEL);
480 }
481 #else /* BITS_PER_LONG == 64 */
482 /*
483  * FIXME: xarrays can't handle 64-bit indexes on a 32-bit arch. For now, just
484  * ignore delegated_inos on 32 bit arch. Maybe eventually add xarrays for top
485  * and bottom words?
486  */
487 static int ceph_parse_deleg_inos(void **p, void *end,
488                                  struct ceph_mds_session *s)
489 {
490         u32 sets;
491
492         ceph_decode_32_safe(p, end, sets, bad);
493         if (sets)
494                 ceph_decode_skip_n(p, end, sets * 2 * sizeof(__le64), bad);
495         return 0;
496 bad:
497         return -EIO;
498 }
499
500 u64 ceph_get_deleg_ino(struct ceph_mds_session *s)
501 {
502         return 0;
503 }
504
505 int ceph_restore_deleg_ino(struct ceph_mds_session *s, u64 ino)
506 {
507         return 0;
508 }
509 #endif /* BITS_PER_LONG == 64 */
510
511 /*
512  * parse create results
513  */
514 static int parse_reply_info_create(void **p, void *end,
515                                   struct ceph_mds_reply_info_parsed *info,
516                                   u64 features, struct ceph_mds_session *s)
517 {
518         int ret;
519
520         if (features == (u64)-1 ||
521             (features & CEPH_FEATURE_REPLY_CREATE_INODE)) {
522                 if (*p == end) {
523                         /* Malformed reply? */
524                         info->has_create_ino = false;
525                 } else if (test_bit(CEPHFS_FEATURE_DELEG_INO, &s->s_features)) {
526                         u8 struct_v, struct_compat;
527                         u32 len;
528
529                         info->has_create_ino = true;
530                         ceph_decode_8_safe(p, end, struct_v, bad);
531                         ceph_decode_8_safe(p, end, struct_compat, bad);
532                         ceph_decode_32_safe(p, end, len, bad);
533                         ceph_decode_64_safe(p, end, info->ino, bad);
534                         ret = ceph_parse_deleg_inos(p, end, s);
535                         if (ret)
536                                 return ret;
537                 } else {
538                         /* legacy */
539                         ceph_decode_64_safe(p, end, info->ino, bad);
540                         info->has_create_ino = true;
541                 }
542         } else {
543                 if (*p != end)
544                         goto bad;
545         }
546
547         /* Skip over any unrecognized fields */
548         *p = end;
549         return 0;
550 bad:
551         return -EIO;
552 }
553
554 /*
555  * parse extra results
556  */
557 static int parse_reply_info_extra(void **p, void *end,
558                                   struct ceph_mds_reply_info_parsed *info,
559                                   u64 features, struct ceph_mds_session *s)
560 {
561         u32 op = le32_to_cpu(info->head->op);
562
563         if (op == CEPH_MDS_OP_GETFILELOCK)
564                 return parse_reply_info_filelock(p, end, info, features);
565         else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
566                 return parse_reply_info_readdir(p, end, info, features);
567         else if (op == CEPH_MDS_OP_CREATE)
568                 return parse_reply_info_create(p, end, info, features, s);
569         else
570                 return -EIO;
571 }
572
573 /*
574  * parse entire mds reply
575  */
576 static int parse_reply_info(struct ceph_mds_session *s, struct ceph_msg *msg,
577                             struct ceph_mds_reply_info_parsed *info,
578                             u64 features)
579 {
580         void *p, *end;
581         u32 len;
582         int err;
583
584         info->head = msg->front.iov_base;
585         p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head);
586         end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head);
587
588         /* trace */
589         ceph_decode_32_safe(&p, end, len, bad);
590         if (len > 0) {
591                 ceph_decode_need(&p, end, len, bad);
592                 err = parse_reply_info_trace(&p, p+len, info, features);
593                 if (err < 0)
594                         goto out_bad;
595         }
596
597         /* extra */
598         ceph_decode_32_safe(&p, end, len, bad);
599         if (len > 0) {
600                 ceph_decode_need(&p, end, len, bad);
601                 err = parse_reply_info_extra(&p, p+len, info, features, s);
602                 if (err < 0)
603                         goto out_bad;
604         }
605
606         /* snap blob */
607         ceph_decode_32_safe(&p, end, len, bad);
608         info->snapblob_len = len;
609         info->snapblob = p;
610         p += len;
611
612         if (p != end)
613                 goto bad;
614         return 0;
615
616 bad:
617         err = -EIO;
618 out_bad:
619         pr_err("mds parse_reply err %d\n", err);
620         return err;
621 }
622
623 static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
624 {
625         if (!info->dir_entries)
626                 return;
627         free_pages((unsigned long)info->dir_entries, get_order(info->dir_buf_size));
628 }
629
630
631 /*
632  * sessions
633  */
634 const char *ceph_session_state_name(int s)
635 {
636         switch (s) {
637         case CEPH_MDS_SESSION_NEW: return "new";
638         case CEPH_MDS_SESSION_OPENING: return "opening";
639         case CEPH_MDS_SESSION_OPEN: return "open";
640         case CEPH_MDS_SESSION_HUNG: return "hung";
641         case CEPH_MDS_SESSION_CLOSING: return "closing";
642         case CEPH_MDS_SESSION_CLOSED: return "closed";
643         case CEPH_MDS_SESSION_RESTARTING: return "restarting";
644         case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
645         case CEPH_MDS_SESSION_REJECTED: return "rejected";
646         default: return "???";
647         }
648 }
649
650 struct ceph_mds_session *ceph_get_mds_session(struct ceph_mds_session *s)
651 {
652         if (refcount_inc_not_zero(&s->s_ref)) {
653                 dout("mdsc get_session %p %d -> %d\n", s,
654                      refcount_read(&s->s_ref)-1, refcount_read(&s->s_ref));
655                 return s;
656         } else {
657                 dout("mdsc get_session %p 0 -- FAIL\n", s);
658                 return NULL;
659         }
660 }
661
662 void ceph_put_mds_session(struct ceph_mds_session *s)
663 {
664         if (IS_ERR_OR_NULL(s))
665                 return;
666
667         dout("mdsc put_session %p %d -> %d\n", s,
668              refcount_read(&s->s_ref), refcount_read(&s->s_ref)-1);
669         if (refcount_dec_and_test(&s->s_ref)) {
670                 if (s->s_auth.authorizer)
671                         ceph_auth_destroy_authorizer(s->s_auth.authorizer);
672                 WARN_ON(mutex_is_locked(&s->s_mutex));
673                 xa_destroy(&s->s_delegated_inos);
674                 kfree(s);
675         }
676 }
677
678 /*
679  * called under mdsc->mutex
680  */
681 struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
682                                                    int mds)
683 {
684         if (mds >= mdsc->max_sessions || !mdsc->sessions[mds])
685                 return NULL;
686         return ceph_get_mds_session(mdsc->sessions[mds]);
687 }
688
689 static bool __have_session(struct ceph_mds_client *mdsc, int mds)
690 {
691         if (mds >= mdsc->max_sessions || !mdsc->sessions[mds])
692                 return false;
693         else
694                 return true;
695 }
696
697 static int __verify_registered_session(struct ceph_mds_client *mdsc,
698                                        struct ceph_mds_session *s)
699 {
700         if (s->s_mds >= mdsc->max_sessions ||
701             mdsc->sessions[s->s_mds] != s)
702                 return -ENOENT;
703         return 0;
704 }
705
706 /*
707  * create+register a new session for given mds.
708  * called under mdsc->mutex.
709  */
710 static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
711                                                  int mds)
712 {
713         struct ceph_mds_session *s;
714
715         if (mds >= mdsc->mdsmap->possible_max_rank)
716                 return ERR_PTR(-EINVAL);
717
718         s = kzalloc(sizeof(*s), GFP_NOFS);
719         if (!s)
720                 return ERR_PTR(-ENOMEM);
721
722         if (mds >= mdsc->max_sessions) {
723                 int newmax = 1 << get_count_order(mds + 1);
724                 struct ceph_mds_session **sa;
725
726                 dout("%s: realloc to %d\n", __func__, newmax);
727                 sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
728                 if (!sa)
729                         goto fail_realloc;
730                 if (mdsc->sessions) {
731                         memcpy(sa, mdsc->sessions,
732                                mdsc->max_sessions * sizeof(void *));
733                         kfree(mdsc->sessions);
734                 }
735                 mdsc->sessions = sa;
736                 mdsc->max_sessions = newmax;
737         }
738
739         dout("%s: mds%d\n", __func__, mds);
740         s->s_mdsc = mdsc;
741         s->s_mds = mds;
742         s->s_state = CEPH_MDS_SESSION_NEW;
743         s->s_ttl = 0;
744         s->s_seq = 0;
745         mutex_init(&s->s_mutex);
746
747         ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr);
748
749         spin_lock_init(&s->s_gen_ttl_lock);
750         s->s_cap_gen = 1;
751         s->s_cap_ttl = jiffies - 1;
752
753         spin_lock_init(&s->s_cap_lock);
754         s->s_renew_requested = 0;
755         s->s_renew_seq = 0;
756         INIT_LIST_HEAD(&s->s_caps);
757         s->s_nr_caps = 0;
758         refcount_set(&s->s_ref, 1);
759         INIT_LIST_HEAD(&s->s_waiting);
760         INIT_LIST_HEAD(&s->s_unsafe);
761         xa_init(&s->s_delegated_inos);
762         s->s_num_cap_releases = 0;
763         s->s_cap_reconnect = 0;
764         s->s_cap_iterator = NULL;
765         INIT_LIST_HEAD(&s->s_cap_releases);
766         INIT_WORK(&s->s_cap_release_work, ceph_cap_release_work);
767
768         INIT_LIST_HEAD(&s->s_cap_dirty);
769         INIT_LIST_HEAD(&s->s_cap_flushing);
770
771         mdsc->sessions[mds] = s;
772         atomic_inc(&mdsc->num_sessions);
773         refcount_inc(&s->s_ref);  /* one ref to sessions[], one to caller */
774
775         ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds,
776                       ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
777
778         return s;
779
780 fail_realloc:
781         kfree(s);
782         return ERR_PTR(-ENOMEM);
783 }
784
785 /*
786  * called under mdsc->mutex
787  */
788 static void __unregister_session(struct ceph_mds_client *mdsc,
789                                struct ceph_mds_session *s)
790 {
791         dout("__unregister_session mds%d %p\n", s->s_mds, s);
792         BUG_ON(mdsc->sessions[s->s_mds] != s);
793         mdsc->sessions[s->s_mds] = NULL;
794         ceph_con_close(&s->s_con);
795         ceph_put_mds_session(s);
796         atomic_dec(&mdsc->num_sessions);
797 }
798
799 /*
800  * drop session refs in request.
801  *
802  * should be last request ref, or hold mdsc->mutex
803  */
804 static void put_request_session(struct ceph_mds_request *req)
805 {
806         if (req->r_session) {
807                 ceph_put_mds_session(req->r_session);
808                 req->r_session = NULL;
809         }
810 }
811
812 void ceph_mdsc_release_request(struct kref *kref)
813 {
814         struct ceph_mds_request *req = container_of(kref,
815                                                     struct ceph_mds_request,
816                                                     r_kref);
817         ceph_mdsc_release_dir_caps_no_check(req);
818         destroy_reply_info(&req->r_reply_info);
819         if (req->r_request)
820                 ceph_msg_put(req->r_request);
821         if (req->r_reply)
822                 ceph_msg_put(req->r_reply);
823         if (req->r_inode) {
824                 ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
825                 /* avoid calling iput_final() in mds dispatch threads */
826                 ceph_async_iput(req->r_inode);
827         }
828         if (req->r_parent) {
829                 ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
830                 ceph_async_iput(req->r_parent);
831         }
832         ceph_async_iput(req->r_target_inode);
833         if (req->r_dentry)
834                 dput(req->r_dentry);
835         if (req->r_old_dentry)
836                 dput(req->r_old_dentry);
837         if (req->r_old_dentry_dir) {
838                 /*
839                  * track (and drop pins for) r_old_dentry_dir
840                  * separately, since r_old_dentry's d_parent may have
841                  * changed between the dir mutex being dropped and
842                  * this request being freed.
843                  */
844                 ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir),
845                                   CEPH_CAP_PIN);
846                 ceph_async_iput(req->r_old_dentry_dir);
847         }
848         kfree(req->r_path1);
849         kfree(req->r_path2);
850         if (req->r_pagelist)
851                 ceph_pagelist_release(req->r_pagelist);
852         put_request_session(req);
853         ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation);
854         WARN_ON_ONCE(!list_empty(&req->r_wait));
855         kmem_cache_free(ceph_mds_request_cachep, req);
856 }
857
858 DEFINE_RB_FUNCS(request, struct ceph_mds_request, r_tid, r_node)
859
860 /*
861  * lookup session, bump ref if found.
862  *
863  * called under mdsc->mutex.
864  */
865 static struct ceph_mds_request *
866 lookup_get_request(struct ceph_mds_client *mdsc, u64 tid)
867 {
868         struct ceph_mds_request *req;
869
870         req = lookup_request(&mdsc->request_tree, tid);
871         if (req)
872                 ceph_mdsc_get_request(req);
873
874         return req;
875 }
876
877 /*
878  * Register an in-flight request, and assign a tid.  Link to directory
879  * are modifying (if any).
880  *
881  * Called under mdsc->mutex.
882  */
883 static void __register_request(struct ceph_mds_client *mdsc,
884                                struct ceph_mds_request *req,
885                                struct inode *dir)
886 {
887         int ret = 0;
888
889         req->r_tid = ++mdsc->last_tid;
890         if (req->r_num_caps) {
891                 ret = ceph_reserve_caps(mdsc, &req->r_caps_reservation,
892                                         req->r_num_caps);
893                 if (ret < 0) {
894                         pr_err("__register_request %p "
895                                "failed to reserve caps: %d\n", req, ret);
896                         /* set req->r_err to fail early from __do_request */
897                         req->r_err = ret;
898                         return;
899                 }
900         }
901         dout("__register_request %p tid %lld\n", req, req->r_tid);
902         ceph_mdsc_get_request(req);
903         insert_request(&mdsc->request_tree, req);
904
905         req->r_uid = current_fsuid();
906         req->r_gid = current_fsgid();
907
908         if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK)
909                 mdsc->oldest_tid = req->r_tid;
910
911         if (dir) {
912                 struct ceph_inode_info *ci = ceph_inode(dir);
913
914                 ihold(dir);
915                 req->r_unsafe_dir = dir;
916                 spin_lock(&ci->i_unsafe_lock);
917                 list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops);
918                 spin_unlock(&ci->i_unsafe_lock);
919         }
920 }
921
922 static void __unregister_request(struct ceph_mds_client *mdsc,
923                                  struct ceph_mds_request *req)
924 {
925         dout("__unregister_request %p tid %lld\n", req, req->r_tid);
926
927         /* Never leave an unregistered request on an unsafe list! */
928         list_del_init(&req->r_unsafe_item);
929
930         if (req->r_tid == mdsc->oldest_tid) {
931                 struct rb_node *p = rb_next(&req->r_node);
932                 mdsc->oldest_tid = 0;
933                 while (p) {
934                         struct ceph_mds_request *next_req =
935                                 rb_entry(p, struct ceph_mds_request, r_node);
936                         if (next_req->r_op != CEPH_MDS_OP_SETFILELOCK) {
937                                 mdsc->oldest_tid = next_req->r_tid;
938                                 break;
939                         }
940                         p = rb_next(p);
941                 }
942         }
943
944         erase_request(&mdsc->request_tree, req);
945
946         if (req->r_unsafe_dir) {
947                 struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
948                 spin_lock(&ci->i_unsafe_lock);
949                 list_del_init(&req->r_unsafe_dir_item);
950                 spin_unlock(&ci->i_unsafe_lock);
951         }
952         if (req->r_target_inode &&
953             test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
954                 struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
955                 spin_lock(&ci->i_unsafe_lock);
956                 list_del_init(&req->r_unsafe_target_item);
957                 spin_unlock(&ci->i_unsafe_lock);
958         }
959
960         if (req->r_unsafe_dir) {
961                 /* avoid calling iput_final() in mds dispatch threads */
962                 ceph_async_iput(req->r_unsafe_dir);
963                 req->r_unsafe_dir = NULL;
964         }
965
966         complete_all(&req->r_safe_completion);
967
968         ceph_mdsc_put_request(req);
969 }
970
971 /*
972  * Walk back up the dentry tree until we hit a dentry representing a
973  * non-snapshot inode. We do this using the rcu_read_lock (which must be held
974  * when calling this) to ensure that the objects won't disappear while we're
975  * working with them. Once we hit a candidate dentry, we attempt to take a
976  * reference to it, and return that as the result.
977  */
978 static struct inode *get_nonsnap_parent(struct dentry *dentry)
979 {
980         struct inode *inode = NULL;
981
982         while (dentry && !IS_ROOT(dentry)) {
983                 inode = d_inode_rcu(dentry);
984                 if (!inode || ceph_snap(inode) == CEPH_NOSNAP)
985                         break;
986                 dentry = dentry->d_parent;
987         }
988         if (inode)
989                 inode = igrab(inode);
990         return inode;
991 }
992
993 /*
994  * Choose mds to send request to next.  If there is a hint set in the
995  * request (e.g., due to a prior forward hint from the mds), use that.
996  * Otherwise, consult frag tree and/or caps to identify the
997  * appropriate mds.  If all else fails, choose randomly.
998  *
999  * Called under mdsc->mutex.
1000  */
1001 static int __choose_mds(struct ceph_mds_client *mdsc,
1002                         struct ceph_mds_request *req,
1003                         bool *random)
1004 {
1005         struct inode *inode;
1006         struct ceph_inode_info *ci;
1007         struct ceph_cap *cap;
1008         int mode = req->r_direct_mode;
1009         int mds = -1;
1010         u32 hash = req->r_direct_hash;
1011         bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags);
1012
1013         if (random)
1014                 *random = false;
1015
1016         /*
1017          * is there a specific mds we should try?  ignore hint if we have
1018          * no session and the mds is not up (active or recovering).
1019          */
1020         if (req->r_resend_mds >= 0 &&
1021             (__have_session(mdsc, req->r_resend_mds) ||
1022              ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
1023                 dout("%s using resend_mds mds%d\n", __func__,
1024                      req->r_resend_mds);
1025                 return req->r_resend_mds;
1026         }
1027
1028         if (mode == USE_RANDOM_MDS)
1029                 goto random;
1030
1031         inode = NULL;
1032         if (req->r_inode) {
1033                 if (ceph_snap(req->r_inode) != CEPH_SNAPDIR) {
1034                         inode = req->r_inode;
1035                         ihold(inode);
1036                 } else {
1037                         /* req->r_dentry is non-null for LSSNAP request */
1038                         rcu_read_lock();
1039                         inode = get_nonsnap_parent(req->r_dentry);
1040                         rcu_read_unlock();
1041                         dout("%s using snapdir's parent %p\n", __func__, inode);
1042                 }
1043         } else if (req->r_dentry) {
1044                 /* ignore race with rename; old or new d_parent is okay */
1045                 struct dentry *parent;
1046                 struct inode *dir;
1047
1048                 rcu_read_lock();
1049                 parent = READ_ONCE(req->r_dentry->d_parent);
1050                 dir = req->r_parent ? : d_inode_rcu(parent);
1051
1052                 if (!dir || dir->i_sb != mdsc->fsc->sb) {
1053                         /*  not this fs or parent went negative */
1054                         inode = d_inode(req->r_dentry);
1055                         if (inode)
1056                                 ihold(inode);
1057                 } else if (ceph_snap(dir) != CEPH_NOSNAP) {
1058                         /* direct snapped/virtual snapdir requests
1059                          * based on parent dir inode */
1060                         inode = get_nonsnap_parent(parent);
1061                         dout("%s using nonsnap parent %p\n", __func__, inode);
1062                 } else {
1063                         /* dentry target */
1064                         inode = d_inode(req->r_dentry);
1065                         if (!inode || mode == USE_AUTH_MDS) {
1066                                 /* dir + name */
1067                                 inode = igrab(dir);
1068                                 hash = ceph_dentry_hash(dir, req->r_dentry);
1069                                 is_hash = true;
1070                         } else {
1071                                 ihold(inode);
1072                         }
1073                 }
1074                 rcu_read_unlock();
1075         }
1076
1077         dout("%s %p is_hash=%d (0x%x) mode %d\n", __func__, inode, (int)is_hash,
1078              hash, mode);
1079         if (!inode)
1080                 goto random;
1081         ci = ceph_inode(inode);
1082
1083         if (is_hash && S_ISDIR(inode->i_mode)) {
1084                 struct ceph_inode_frag frag;
1085                 int found;
1086
1087                 ceph_choose_frag(ci, hash, &frag, &found);
1088                 if (found) {
1089                         if (mode == USE_ANY_MDS && frag.ndist > 0) {
1090                                 u8 r;
1091
1092                                 /* choose a random replica */
1093                                 get_random_bytes(&r, 1);
1094                                 r %= frag.ndist;
1095                                 mds = frag.dist[r];
1096                                 dout("%s %p %llx.%llx frag %u mds%d (%d/%d)\n",
1097                                      __func__, inode, ceph_vinop(inode),
1098                                      frag.frag, mds, (int)r, frag.ndist);
1099                                 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
1100                                     CEPH_MDS_STATE_ACTIVE &&
1101                                     !ceph_mdsmap_is_laggy(mdsc->mdsmap, mds))
1102                                         goto out;
1103                         }
1104
1105                         /* since this file/dir wasn't known to be
1106                          * replicated, then we want to look for the
1107                          * authoritative mds. */
1108                         if (frag.mds >= 0) {
1109                                 /* choose auth mds */
1110                                 mds = frag.mds;
1111                                 dout("%s %p %llx.%llx frag %u mds%d (auth)\n",
1112                                      __func__, inode, ceph_vinop(inode),
1113                                      frag.frag, mds);
1114                                 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
1115                                     CEPH_MDS_STATE_ACTIVE) {
1116                                         if (!ceph_mdsmap_is_laggy(mdsc->mdsmap,
1117                                                                   mds))
1118                                                 goto out;
1119                                 }
1120                         }
1121                         mode = USE_AUTH_MDS;
1122                 }
1123         }
1124
1125         spin_lock(&ci->i_ceph_lock);
1126         cap = NULL;
1127         if (mode == USE_AUTH_MDS)
1128                 cap = ci->i_auth_cap;
1129         if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
1130                 cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
1131         if (!cap) {
1132                 spin_unlock(&ci->i_ceph_lock);
1133                 ceph_async_iput(inode);
1134                 goto random;
1135         }
1136         mds = cap->session->s_mds;
1137         dout("%s %p %llx.%llx mds%d (%scap %p)\n", __func__,
1138              inode, ceph_vinop(inode), mds,
1139              cap == ci->i_auth_cap ? "auth " : "", cap);
1140         spin_unlock(&ci->i_ceph_lock);
1141 out:
1142         /* avoid calling iput_final() while holding mdsc->mutex or
1143          * in mds dispatch threads */
1144         ceph_async_iput(inode);
1145         return mds;
1146
1147 random:
1148         if (random)
1149                 *random = true;
1150
1151         mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
1152         dout("%s chose random mds%d\n", __func__, mds);
1153         return mds;
1154 }
1155
1156
1157 /*
1158  * session messages
1159  */
1160 static struct ceph_msg *create_session_msg(u32 op, u64 seq)
1161 {
1162         struct ceph_msg *msg;
1163         struct ceph_mds_session_head *h;
1164
1165         msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS,
1166                            false);
1167         if (!msg) {
1168                 pr_err("create_session_msg ENOMEM creating msg\n");
1169                 return NULL;
1170         }
1171         h = msg->front.iov_base;
1172         h->op = cpu_to_le32(op);
1173         h->seq = cpu_to_le64(seq);
1174
1175         return msg;
1176 }
1177
1178 static const unsigned char feature_bits[] = CEPHFS_FEATURES_CLIENT_SUPPORTED;
1179 #define FEATURE_BYTES(c) (DIV_ROUND_UP((size_t)feature_bits[c - 1] + 1, 64) * 8)
1180 static int encode_supported_features(void **p, void *end)
1181 {
1182         static const size_t count = ARRAY_SIZE(feature_bits);
1183
1184         if (count > 0) {
1185                 size_t i;
1186                 size_t size = FEATURE_BYTES(count);
1187                 unsigned long bit;
1188
1189                 if (WARN_ON_ONCE(*p + 4 + size > end))
1190                         return -ERANGE;
1191
1192                 ceph_encode_32(p, size);
1193                 memset(*p, 0, size);
1194                 for (i = 0; i < count; i++) {
1195                         bit = feature_bits[i];
1196                         ((unsigned char *)(*p))[bit / 8] |= BIT(bit % 8);
1197                 }
1198                 *p += size;
1199         } else {
1200                 if (WARN_ON_ONCE(*p + 4 > end))
1201                         return -ERANGE;
1202
1203                 ceph_encode_32(p, 0);
1204         }
1205
1206         return 0;
1207 }
1208
1209 static const unsigned char metric_bits[] = CEPHFS_METRIC_SPEC_CLIENT_SUPPORTED;
1210 #define METRIC_BYTES(cnt) (DIV_ROUND_UP((size_t)metric_bits[cnt - 1] + 1, 64) * 8)
1211 static int encode_metric_spec(void **p, void *end)
1212 {
1213         static const size_t count = ARRAY_SIZE(metric_bits);
1214
1215         /* header */
1216         if (WARN_ON_ONCE(*p + 2 > end))
1217                 return -ERANGE;
1218
1219         ceph_encode_8(p, 1); /* version */
1220         ceph_encode_8(p, 1); /* compat */
1221
1222         if (count > 0) {
1223                 size_t i;
1224                 size_t size = METRIC_BYTES(count);
1225
1226                 if (WARN_ON_ONCE(*p + 4 + 4 + size > end))
1227                         return -ERANGE;
1228
1229                 /* metric spec info length */
1230                 ceph_encode_32(p, 4 + size);
1231
1232                 /* metric spec */
1233                 ceph_encode_32(p, size);
1234                 memset(*p, 0, size);
1235                 for (i = 0; i < count; i++)
1236                         ((unsigned char *)(*p))[i / 8] |= BIT(metric_bits[i] % 8);
1237                 *p += size;
1238         } else {
1239                 if (WARN_ON_ONCE(*p + 4 + 4 > end))
1240                         return -ERANGE;
1241
1242                 /* metric spec info length */
1243                 ceph_encode_32(p, 4);
1244                 /* metric spec */
1245                 ceph_encode_32(p, 0);
1246         }
1247
1248         return 0;
1249 }
1250
1251 /*
1252  * session message, specialization for CEPH_SESSION_REQUEST_OPEN
1253  * to include additional client metadata fields.
1254  */
1255 static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq)
1256 {
1257         struct ceph_msg *msg;
1258         struct ceph_mds_session_head *h;
1259         int i = -1;
1260         int extra_bytes = 0;
1261         int metadata_key_count = 0;
1262         struct ceph_options *opt = mdsc->fsc->client->options;
1263         struct ceph_mount_options *fsopt = mdsc->fsc->mount_options;
1264         size_t size, count;
1265         void *p, *end;
1266         int ret;
1267
1268         const char* metadata[][2] = {
1269                 {"hostname", mdsc->nodename},
1270                 {"kernel_version", init_utsname()->release},
1271                 {"entity_id", opt->name ? : ""},
1272                 {"root", fsopt->server_path ? : "/"},
1273                 {NULL, NULL}
1274         };
1275
1276         /* Calculate serialized length of metadata */
1277         extra_bytes = 4;  /* map length */
1278         for (i = 0; metadata[i][0]; ++i) {
1279                 extra_bytes += 8 + strlen(metadata[i][0]) +
1280                         strlen(metadata[i][1]);
1281                 metadata_key_count++;
1282         }
1283
1284         /* supported feature */
1285         size = 0;
1286         count = ARRAY_SIZE(feature_bits);
1287         if (count > 0)
1288                 size = FEATURE_BYTES(count);
1289         extra_bytes += 4 + size;
1290
1291         /* metric spec */
1292         size = 0;
1293         count = ARRAY_SIZE(metric_bits);
1294         if (count > 0)
1295                 size = METRIC_BYTES(count);
1296         extra_bytes += 2 + 4 + 4 + size;
1297
1298         /* Allocate the message */
1299         msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + extra_bytes,
1300                            GFP_NOFS, false);
1301         if (!msg) {
1302                 pr_err("create_session_msg ENOMEM creating msg\n");
1303                 return ERR_PTR(-ENOMEM);
1304         }
1305         p = msg->front.iov_base;
1306         end = p + msg->front.iov_len;
1307
1308         h = p;
1309         h->op = cpu_to_le32(CEPH_SESSION_REQUEST_OPEN);
1310         h->seq = cpu_to_le64(seq);
1311
1312         /*
1313          * Serialize client metadata into waiting buffer space, using
1314          * the format that userspace expects for map<string, string>
1315          *
1316          * ClientSession messages with metadata are v4
1317          */
1318         msg->hdr.version = cpu_to_le16(4);
1319         msg->hdr.compat_version = cpu_to_le16(1);
1320
1321         /* The write pointer, following the session_head structure */
1322         p += sizeof(*h);
1323
1324         /* Number of entries in the map */
1325         ceph_encode_32(&p, metadata_key_count);
1326
1327         /* Two length-prefixed strings for each entry in the map */
1328         for (i = 0; metadata[i][0]; ++i) {
1329                 size_t const key_len = strlen(metadata[i][0]);
1330                 size_t const val_len = strlen(metadata[i][1]);
1331
1332                 ceph_encode_32(&p, key_len);
1333                 memcpy(p, metadata[i][0], key_len);
1334                 p += key_len;
1335                 ceph_encode_32(&p, val_len);
1336                 memcpy(p, metadata[i][1], val_len);
1337                 p += val_len;
1338         }
1339
1340         ret = encode_supported_features(&p, end);
1341         if (ret) {
1342                 pr_err("encode_supported_features failed!\n");
1343                 ceph_msg_put(msg);
1344                 return ERR_PTR(ret);
1345         }
1346
1347         ret = encode_metric_spec(&p, end);
1348         if (ret) {
1349                 pr_err("encode_metric_spec failed!\n");
1350                 ceph_msg_put(msg);
1351                 return ERR_PTR(ret);
1352         }
1353
1354         msg->front.iov_len = p - msg->front.iov_base;
1355         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1356
1357         return msg;
1358 }
1359
1360 /*
1361  * send session open request.
1362  *
1363  * called under mdsc->mutex
1364  */
1365 static int __open_session(struct ceph_mds_client *mdsc,
1366                           struct ceph_mds_session *session)
1367 {
1368         struct ceph_msg *msg;
1369         int mstate;
1370         int mds = session->s_mds;
1371
1372         /* wait for mds to go active? */
1373         mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
1374         dout("open_session to mds%d (%s)\n", mds,
1375              ceph_mds_state_name(mstate));
1376         session->s_state = CEPH_MDS_SESSION_OPENING;
1377         session->s_renew_requested = jiffies;
1378
1379         /* send connect message */
1380         msg = create_session_open_msg(mdsc, session->s_seq);
1381         if (IS_ERR(msg))
1382                 return PTR_ERR(msg);
1383         ceph_con_send(&session->s_con, msg);
1384         return 0;
1385 }
1386
1387 /*
1388  * open sessions for any export targets for the given mds
1389  *
1390  * called under mdsc->mutex
1391  */
1392 static struct ceph_mds_session *
1393 __open_export_target_session(struct ceph_mds_client *mdsc, int target)
1394 {
1395         struct ceph_mds_session *session;
1396         int ret;
1397
1398         session = __ceph_lookup_mds_session(mdsc, target);
1399         if (!session) {
1400                 session = register_session(mdsc, target);
1401                 if (IS_ERR(session))
1402                         return session;
1403         }
1404         if (session->s_state == CEPH_MDS_SESSION_NEW ||
1405             session->s_state == CEPH_MDS_SESSION_CLOSING) {
1406                 ret = __open_session(mdsc, session);
1407                 if (ret)
1408                         return ERR_PTR(ret);
1409         }
1410
1411         return session;
1412 }
1413
1414 struct ceph_mds_session *
1415 ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target)
1416 {
1417         struct ceph_mds_session *session;
1418
1419         dout("open_export_target_session to mds%d\n", target);
1420
1421         mutex_lock(&mdsc->mutex);
1422         session = __open_export_target_session(mdsc, target);
1423         mutex_unlock(&mdsc->mutex);
1424
1425         return session;
1426 }
1427
1428 static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
1429                                           struct ceph_mds_session *session)
1430 {
1431         struct ceph_mds_info *mi;
1432         struct ceph_mds_session *ts;
1433         int i, mds = session->s_mds;
1434
1435         if (mds >= mdsc->mdsmap->possible_max_rank)
1436                 return;
1437
1438         mi = &mdsc->mdsmap->m_info[mds];
1439         dout("open_export_target_sessions for mds%d (%d targets)\n",
1440              session->s_mds, mi->num_export_targets);
1441
1442         for (i = 0; i < mi->num_export_targets; i++) {
1443                 ts = __open_export_target_session(mdsc, mi->export_targets[i]);
1444                 ceph_put_mds_session(ts);
1445         }
1446 }
1447
1448 void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
1449                                            struct ceph_mds_session *session)
1450 {
1451         mutex_lock(&mdsc->mutex);
1452         __open_export_target_sessions(mdsc, session);
1453         mutex_unlock(&mdsc->mutex);
1454 }
1455
1456 /*
1457  * session caps
1458  */
1459
1460 static void detach_cap_releases(struct ceph_mds_session *session,
1461                                 struct list_head *target)
1462 {
1463         lockdep_assert_held(&session->s_cap_lock);
1464
1465         list_splice_init(&session->s_cap_releases, target);
1466         session->s_num_cap_releases = 0;
1467         dout("dispose_cap_releases mds%d\n", session->s_mds);
1468 }
1469
1470 static void dispose_cap_releases(struct ceph_mds_client *mdsc,
1471                                  struct list_head *dispose)
1472 {
1473         while (!list_empty(dispose)) {
1474                 struct ceph_cap *cap;
1475                 /* zero out the in-progress message */
1476                 cap = list_first_entry(dispose, struct ceph_cap, session_caps);
1477                 list_del(&cap->session_caps);
1478                 ceph_put_cap(mdsc, cap);
1479         }
1480 }
1481
1482 static void cleanup_session_requests(struct ceph_mds_client *mdsc,
1483                                      struct ceph_mds_session *session)
1484 {
1485         struct ceph_mds_request *req;
1486         struct rb_node *p;
1487
1488         dout("cleanup_session_requests mds%d\n", session->s_mds);
1489         mutex_lock(&mdsc->mutex);
1490         while (!list_empty(&session->s_unsafe)) {
1491                 req = list_first_entry(&session->s_unsafe,
1492                                        struct ceph_mds_request, r_unsafe_item);
1493                 pr_warn_ratelimited(" dropping unsafe request %llu\n",
1494                                     req->r_tid);
1495                 if (req->r_target_inode)
1496                         mapping_set_error(req->r_target_inode->i_mapping, -EIO);
1497                 if (req->r_unsafe_dir)
1498                         mapping_set_error(req->r_unsafe_dir->i_mapping, -EIO);
1499                 __unregister_request(mdsc, req);
1500         }
1501         /* zero r_attempts, so kick_requests() will re-send requests */
1502         p = rb_first(&mdsc->request_tree);
1503         while (p) {
1504                 req = rb_entry(p, struct ceph_mds_request, r_node);
1505                 p = rb_next(p);
1506                 if (req->r_session &&
1507                     req->r_session->s_mds == session->s_mds)
1508                         req->r_attempts = 0;
1509         }
1510         mutex_unlock(&mdsc->mutex);
1511 }
1512
1513 /*
1514  * Helper to safely iterate over all caps associated with a session, with
1515  * special care taken to handle a racing __ceph_remove_cap().
1516  *
1517  * Caller must hold session s_mutex.
1518  */
1519 int ceph_iterate_session_caps(struct ceph_mds_session *session,
1520                               int (*cb)(struct inode *, struct ceph_cap *,
1521                                         void *), void *arg)
1522 {
1523         struct list_head *p;
1524         struct ceph_cap *cap;
1525         struct inode *inode, *last_inode = NULL;
1526         struct ceph_cap *old_cap = NULL;
1527         int ret;
1528
1529         dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
1530         spin_lock(&session->s_cap_lock);
1531         p = session->s_caps.next;
1532         while (p != &session->s_caps) {
1533                 cap = list_entry(p, struct ceph_cap, session_caps);
1534                 inode = igrab(&cap->ci->vfs_inode);
1535                 if (!inode) {
1536                         p = p->next;
1537                         continue;
1538                 }
1539                 session->s_cap_iterator = cap;
1540                 spin_unlock(&session->s_cap_lock);
1541
1542                 if (last_inode) {
1543                         /* avoid calling iput_final() while holding
1544                          * s_mutex or in mds dispatch threads */
1545                         ceph_async_iput(last_inode);
1546                         last_inode = NULL;
1547                 }
1548                 if (old_cap) {
1549                         ceph_put_cap(session->s_mdsc, old_cap);
1550                         old_cap = NULL;
1551                 }
1552
1553                 ret = cb(inode, cap, arg);
1554                 last_inode = inode;
1555
1556                 spin_lock(&session->s_cap_lock);
1557                 p = p->next;
1558                 if (!cap->ci) {
1559                         dout("iterate_session_caps  finishing cap %p removal\n",
1560                              cap);
1561                         BUG_ON(cap->session != session);
1562                         cap->session = NULL;
1563                         list_del_init(&cap->session_caps);
1564                         session->s_nr_caps--;
1565                         atomic64_dec(&session->s_mdsc->metric.total_caps);
1566                         if (cap->queue_release)
1567                                 __ceph_queue_cap_release(session, cap);
1568                         else
1569                                 old_cap = cap;  /* put_cap it w/o locks held */
1570                 }
1571                 if (ret < 0)
1572                         goto out;
1573         }
1574         ret = 0;
1575 out:
1576         session->s_cap_iterator = NULL;
1577         spin_unlock(&session->s_cap_lock);
1578
1579         ceph_async_iput(last_inode);
1580         if (old_cap)
1581                 ceph_put_cap(session->s_mdsc, old_cap);
1582
1583         return ret;
1584 }
1585
1586 static int remove_capsnaps(struct ceph_mds_client *mdsc, struct inode *inode)
1587 {
1588         struct ceph_inode_info *ci = ceph_inode(inode);
1589         struct ceph_cap_snap *capsnap;
1590         int capsnap_release = 0;
1591
1592         lockdep_assert_held(&ci->i_ceph_lock);
1593
1594         dout("removing capsnaps, ci is %p, inode is %p\n", ci, inode);
1595
1596         while (!list_empty(&ci->i_cap_snaps)) {
1597                 capsnap = list_first_entry(&ci->i_cap_snaps,
1598                                            struct ceph_cap_snap, ci_item);
1599                 __ceph_remove_capsnap(inode, capsnap, NULL, NULL);
1600                 ceph_put_snap_context(capsnap->context);
1601                 ceph_put_cap_snap(capsnap);
1602                 capsnap_release++;
1603         }
1604         wake_up_all(&ci->i_cap_wq);
1605         wake_up_all(&mdsc->cap_flushing_wq);
1606         return capsnap_release;
1607 }
1608
1609 static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
1610                                   void *arg)
1611 {
1612         struct ceph_fs_client *fsc = (struct ceph_fs_client *)arg;
1613         struct ceph_mds_client *mdsc = fsc->mdsc;
1614         struct ceph_inode_info *ci = ceph_inode(inode);
1615         LIST_HEAD(to_remove);
1616         bool dirty_dropped = false;
1617         bool invalidate = false;
1618         int capsnap_release = 0;
1619
1620         dout("removing cap %p, ci is %p, inode is %p\n",
1621              cap, ci, &ci->vfs_inode);
1622         spin_lock(&ci->i_ceph_lock);
1623         __ceph_remove_cap(cap, false);
1624         if (!ci->i_auth_cap) {
1625                 struct ceph_cap_flush *cf;
1626
1627                 if (READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
1628                         if (inode->i_data.nrpages > 0)
1629                                 invalidate = true;
1630                         if (ci->i_wrbuffer_ref > 0)
1631                                 mapping_set_error(&inode->i_data, -EIO);
1632                 }
1633
1634                 while (!list_empty(&ci->i_cap_flush_list)) {
1635                         cf = list_first_entry(&ci->i_cap_flush_list,
1636                                               struct ceph_cap_flush, i_list);
1637                         list_move(&cf->i_list, &to_remove);
1638                 }
1639
1640                 spin_lock(&mdsc->cap_dirty_lock);
1641
1642                 list_for_each_entry(cf, &to_remove, i_list)
1643                         list_del_init(&cf->g_list);
1644
1645                 if (!list_empty(&ci->i_dirty_item)) {
1646                         pr_warn_ratelimited(
1647                                 " dropping dirty %s state for %p %lld\n",
1648                                 ceph_cap_string(ci->i_dirty_caps),
1649                                 inode, ceph_ino(inode));
1650                         ci->i_dirty_caps = 0;
1651                         list_del_init(&ci->i_dirty_item);
1652                         dirty_dropped = true;
1653                 }
1654                 if (!list_empty(&ci->i_flushing_item)) {
1655                         pr_warn_ratelimited(
1656                                 " dropping dirty+flushing %s state for %p %lld\n",
1657                                 ceph_cap_string(ci->i_flushing_caps),
1658                                 inode, ceph_ino(inode));
1659                         ci->i_flushing_caps = 0;
1660                         list_del_init(&ci->i_flushing_item);
1661                         mdsc->num_cap_flushing--;
1662                         dirty_dropped = true;
1663                 }
1664                 spin_unlock(&mdsc->cap_dirty_lock);
1665
1666                 if (dirty_dropped) {
1667                         mapping_set_error(inode->i_mapping, -EIO);
1668
1669                         if (ci->i_wrbuffer_ref_head == 0 &&
1670                             ci->i_wr_ref == 0 &&
1671                             ci->i_dirty_caps == 0 &&
1672                             ci->i_flushing_caps == 0) {
1673                                 ceph_put_snap_context(ci->i_head_snapc);
1674                                 ci->i_head_snapc = NULL;
1675                         }
1676                 }
1677
1678                 if (atomic_read(&ci->i_filelock_ref) > 0) {
1679                         /* make further file lock syscall return -EIO */
1680                         ci->i_ceph_flags |= CEPH_I_ERROR_FILELOCK;
1681                         pr_warn_ratelimited(" dropping file locks for %p %lld\n",
1682                                             inode, ceph_ino(inode));
1683                 }
1684
1685                 if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) {
1686                         list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
1687                         ci->i_prealloc_cap_flush = NULL;
1688                 }
1689
1690                 if (!list_empty(&ci->i_cap_snaps))
1691                         capsnap_release = remove_capsnaps(mdsc, inode);
1692         }
1693         spin_unlock(&ci->i_ceph_lock);
1694         while (!list_empty(&to_remove)) {
1695                 struct ceph_cap_flush *cf;
1696                 cf = list_first_entry(&to_remove,
1697                                       struct ceph_cap_flush, i_list);
1698                 list_del_init(&cf->i_list);
1699                 if (!cf->is_capsnap)
1700                         ceph_free_cap_flush(cf);
1701         }
1702
1703         wake_up_all(&ci->i_cap_wq);
1704         if (invalidate)
1705                 ceph_queue_invalidate(inode);
1706         if (dirty_dropped)
1707                 iput(inode);
1708         while (capsnap_release--)
1709                 iput(inode);
1710         return 0;
1711 }
1712
1713 /*
1714  * caller must hold session s_mutex
1715  */
1716 static void remove_session_caps(struct ceph_mds_session *session)
1717 {
1718         struct ceph_fs_client *fsc = session->s_mdsc->fsc;
1719         struct super_block *sb = fsc->sb;
1720         LIST_HEAD(dispose);
1721
1722         dout("remove_session_caps on %p\n", session);
1723         ceph_iterate_session_caps(session, remove_session_caps_cb, fsc);
1724
1725         wake_up_all(&fsc->mdsc->cap_flushing_wq);
1726
1727         spin_lock(&session->s_cap_lock);
1728         if (session->s_nr_caps > 0) {
1729                 struct inode *inode;
1730                 struct ceph_cap *cap, *prev = NULL;
1731                 struct ceph_vino vino;
1732                 /*
1733                  * iterate_session_caps() skips inodes that are being
1734                  * deleted, we need to wait until deletions are complete.
1735                  * __wait_on_freeing_inode() is designed for the job,
1736                  * but it is not exported, so use lookup inode function
1737                  * to access it.
1738                  */
1739                 while (!list_empty(&session->s_caps)) {
1740                         cap = list_entry(session->s_caps.next,
1741                                          struct ceph_cap, session_caps);
1742                         if (cap == prev)
1743                                 break;
1744                         prev = cap;
1745                         vino = cap->ci->i_vino;
1746                         spin_unlock(&session->s_cap_lock);
1747
1748                         inode = ceph_find_inode(sb, vino);
1749                          /* avoid calling iput_final() while holding s_mutex */
1750                         ceph_async_iput(inode);
1751
1752                         spin_lock(&session->s_cap_lock);
1753                 }
1754         }
1755
1756         // drop cap expires and unlock s_cap_lock
1757         detach_cap_releases(session, &dispose);
1758
1759         BUG_ON(session->s_nr_caps > 0);
1760         BUG_ON(!list_empty(&session->s_cap_flushing));
1761         spin_unlock(&session->s_cap_lock);
1762         dispose_cap_releases(session->s_mdsc, &dispose);
1763 }
1764
1765 enum {
1766         RECONNECT,
1767         RENEWCAPS,
1768         FORCE_RO,
1769 };
1770
1771 /*
1772  * wake up any threads waiting on this session's caps.  if the cap is
1773  * old (didn't get renewed on the client reconnect), remove it now.
1774  *
1775  * caller must hold s_mutex.
1776  */
1777 static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
1778                               void *arg)
1779 {
1780         struct ceph_inode_info *ci = ceph_inode(inode);
1781         unsigned long ev = (unsigned long)arg;
1782
1783         if (ev == RECONNECT) {
1784                 spin_lock(&ci->i_ceph_lock);
1785                 ci->i_wanted_max_size = 0;
1786                 ci->i_requested_max_size = 0;
1787                 spin_unlock(&ci->i_ceph_lock);
1788         } else if (ev == RENEWCAPS) {
1789                 if (cap->cap_gen < cap->session->s_cap_gen) {
1790                         /* mds did not re-issue stale cap */
1791                         spin_lock(&ci->i_ceph_lock);
1792                         cap->issued = cap->implemented = CEPH_CAP_PIN;
1793                         spin_unlock(&ci->i_ceph_lock);
1794                 }
1795         } else if (ev == FORCE_RO) {
1796         }
1797         wake_up_all(&ci->i_cap_wq);
1798         return 0;
1799 }
1800
1801 static void wake_up_session_caps(struct ceph_mds_session *session, int ev)
1802 {
1803         dout("wake_up_session_caps %p mds%d\n", session, session->s_mds);
1804         ceph_iterate_session_caps(session, wake_up_session_cb,
1805                                   (void *)(unsigned long)ev);
1806 }
1807
1808 /*
1809  * Send periodic message to MDS renewing all currently held caps.  The
1810  * ack will reset the expiration for all caps from this session.
1811  *
1812  * caller holds s_mutex
1813  */
1814 static int send_renew_caps(struct ceph_mds_client *mdsc,
1815                            struct ceph_mds_session *session)
1816 {
1817         struct ceph_msg *msg;
1818         int state;
1819
1820         if (time_after_eq(jiffies, session->s_cap_ttl) &&
1821             time_after_eq(session->s_cap_ttl, session->s_renew_requested))
1822                 pr_info("mds%d caps stale\n", session->s_mds);
1823         session->s_renew_requested = jiffies;
1824
1825         /* do not try to renew caps until a recovering mds has reconnected
1826          * with its clients. */
1827         state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
1828         if (state < CEPH_MDS_STATE_RECONNECT) {
1829                 dout("send_renew_caps ignoring mds%d (%s)\n",
1830                      session->s_mds, ceph_mds_state_name(state));
1831                 return 0;
1832         }
1833
1834         dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
1835                 ceph_mds_state_name(state));
1836         msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
1837                                  ++session->s_renew_seq);
1838         if (!msg)
1839                 return -ENOMEM;
1840         ceph_con_send(&session->s_con, msg);
1841         return 0;
1842 }
1843
1844 static int send_flushmsg_ack(struct ceph_mds_client *mdsc,
1845                              struct ceph_mds_session *session, u64 seq)
1846 {
1847         struct ceph_msg *msg;
1848
1849         dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n",
1850              session->s_mds, ceph_session_state_name(session->s_state), seq);
1851         msg = create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq);
1852         if (!msg)
1853                 return -ENOMEM;
1854         ceph_con_send(&session->s_con, msg);
1855         return 0;
1856 }
1857
1858
1859 /*
1860  * Note new cap ttl, and any transition from stale -> not stale (fresh?).
1861  *
1862  * Called under session->s_mutex
1863  */
1864 static void renewed_caps(struct ceph_mds_client *mdsc,
1865                          struct ceph_mds_session *session, int is_renew)
1866 {
1867         int was_stale;
1868         int wake = 0;
1869
1870         spin_lock(&session->s_cap_lock);
1871         was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl);
1872
1873         session->s_cap_ttl = session->s_renew_requested +
1874                 mdsc->mdsmap->m_session_timeout*HZ;
1875
1876         if (was_stale) {
1877                 if (time_before(jiffies, session->s_cap_ttl)) {
1878                         pr_info("mds%d caps renewed\n", session->s_mds);
1879                         wake = 1;
1880                 } else {
1881                         pr_info("mds%d caps still stale\n", session->s_mds);
1882                 }
1883         }
1884         dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
1885              session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh",
1886              time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
1887         spin_unlock(&session->s_cap_lock);
1888
1889         if (wake)
1890                 wake_up_session_caps(session, RENEWCAPS);
1891 }
1892
1893 /*
1894  * send a session close request
1895  */
1896 static int request_close_session(struct ceph_mds_session *session)
1897 {
1898         struct ceph_msg *msg;
1899
1900         dout("request_close_session mds%d state %s seq %lld\n",
1901              session->s_mds, ceph_session_state_name(session->s_state),
1902              session->s_seq);
1903         msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq);
1904         if (!msg)
1905                 return -ENOMEM;
1906         ceph_con_send(&session->s_con, msg);
1907         return 1;
1908 }
1909
1910 /*
1911  * Called with s_mutex held.
1912  */
1913 static int __close_session(struct ceph_mds_client *mdsc,
1914                          struct ceph_mds_session *session)
1915 {
1916         if (session->s_state >= CEPH_MDS_SESSION_CLOSING)
1917                 return 0;
1918         session->s_state = CEPH_MDS_SESSION_CLOSING;
1919         return request_close_session(session);
1920 }
1921
1922 static bool drop_negative_children(struct dentry *dentry)
1923 {
1924         struct dentry *child;
1925         bool all_negative = true;
1926
1927         if (!d_is_dir(dentry))
1928                 goto out;
1929
1930         spin_lock(&dentry->d_lock);
1931         list_for_each_entry(child, &dentry->d_subdirs, d_child) {
1932                 if (d_really_is_positive(child)) {
1933                         all_negative = false;
1934                         break;
1935                 }
1936         }
1937         spin_unlock(&dentry->d_lock);
1938
1939         if (all_negative)
1940                 shrink_dcache_parent(dentry);
1941 out:
1942         return all_negative;
1943 }
1944
1945 /*
1946  * Trim old(er) caps.
1947  *
1948  * Because we can't cache an inode without one or more caps, we do
1949  * this indirectly: if a cap is unused, we prune its aliases, at which
1950  * point the inode will hopefully get dropped to.
1951  *
1952  * Yes, this is a bit sloppy.  Our only real goal here is to respond to
1953  * memory pressure from the MDS, though, so it needn't be perfect.
1954  */
1955 static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
1956 {
1957         int *remaining = arg;
1958         struct ceph_inode_info *ci = ceph_inode(inode);
1959         int used, wanted, oissued, mine;
1960
1961         if (*remaining <= 0)
1962                 return -1;
1963
1964         spin_lock(&ci->i_ceph_lock);
1965         mine = cap->issued | cap->implemented;
1966         used = __ceph_caps_used(ci);
1967         wanted = __ceph_caps_file_wanted(ci);
1968         oissued = __ceph_caps_issued_other(ci, cap);
1969
1970         dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n",
1971              inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued),
1972              ceph_cap_string(used), ceph_cap_string(wanted));
1973         if (cap == ci->i_auth_cap) {
1974                 if (ci->i_dirty_caps || ci->i_flushing_caps ||
1975                     !list_empty(&ci->i_cap_snaps))
1976                         goto out;
1977                 if ((used | wanted) & CEPH_CAP_ANY_WR)
1978                         goto out;
1979                 /* Note: it's possible that i_filelock_ref becomes non-zero
1980                  * after dropping auth caps. It doesn't hurt because reply
1981                  * of lock mds request will re-add auth caps. */
1982                 if (atomic_read(&ci->i_filelock_ref) > 0)
1983                         goto out;
1984         }
1985         /* The inode has cached pages, but it's no longer used.
1986          * we can safely drop it */
1987         if (S_ISREG(inode->i_mode) &&
1988             wanted == 0 && used == CEPH_CAP_FILE_CACHE &&
1989             !(oissued & CEPH_CAP_FILE_CACHE)) {
1990           used = 0;
1991           oissued = 0;
1992         }
1993         if ((used | wanted) & ~oissued & mine)
1994                 goto out;   /* we need these caps */
1995
1996         if (oissued) {
1997                 /* we aren't the only cap.. just remove us */
1998                 __ceph_remove_cap(cap, true);
1999                 (*remaining)--;
2000         } else {
2001                 struct dentry *dentry;
2002                 /* try dropping referring dentries */
2003                 spin_unlock(&ci->i_ceph_lock);
2004                 dentry = d_find_any_alias(inode);
2005                 if (dentry && drop_negative_children(dentry)) {
2006                         int count;
2007                         dput(dentry);
2008                         d_prune_aliases(inode);
2009                         count = atomic_read(&inode->i_count);
2010                         if (count == 1)
2011                                 (*remaining)--;
2012                         dout("trim_caps_cb %p cap %p pruned, count now %d\n",
2013                              inode, cap, count);
2014                 } else {
2015                         dput(dentry);
2016                 }
2017                 return 0;
2018         }
2019
2020 out:
2021         spin_unlock(&ci->i_ceph_lock);
2022         return 0;
2023 }
2024
2025 /*
2026  * Trim session cap count down to some max number.
2027  */
2028 int ceph_trim_caps(struct ceph_mds_client *mdsc,
2029                    struct ceph_mds_session *session,
2030                    int max_caps)
2031 {
2032         int trim_caps = session->s_nr_caps - max_caps;
2033
2034         dout("trim_caps mds%d start: %d / %d, trim %d\n",
2035              session->s_mds, session->s_nr_caps, max_caps, trim_caps);
2036         if (trim_caps > 0) {
2037                 int remaining = trim_caps;
2038
2039                 ceph_iterate_session_caps(session, trim_caps_cb, &remaining);
2040                 dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
2041                      session->s_mds, session->s_nr_caps, max_caps,
2042                         trim_caps - remaining);
2043         }
2044
2045         ceph_flush_cap_releases(mdsc, session);
2046         return 0;
2047 }
2048
2049 static int check_caps_flush(struct ceph_mds_client *mdsc,
2050                             u64 want_flush_tid)
2051 {
2052         int ret = 1;
2053
2054         spin_lock(&mdsc->cap_dirty_lock);
2055         if (!list_empty(&mdsc->cap_flush_list)) {
2056                 struct ceph_cap_flush *cf =
2057                         list_first_entry(&mdsc->cap_flush_list,
2058                                          struct ceph_cap_flush, g_list);
2059                 if (cf->tid <= want_flush_tid) {
2060                         dout("check_caps_flush still flushing tid "
2061                              "%llu <= %llu\n", cf->tid, want_flush_tid);
2062                         ret = 0;
2063                 }
2064         }
2065         spin_unlock(&mdsc->cap_dirty_lock);
2066         return ret;
2067 }
2068
2069 /*
2070  * flush all dirty inode data to disk.
2071  *
2072  * returns true if we've flushed through want_flush_tid
2073  */
2074 static void wait_caps_flush(struct ceph_mds_client *mdsc,
2075                             u64 want_flush_tid)
2076 {
2077         dout("check_caps_flush want %llu\n", want_flush_tid);
2078
2079         wait_event(mdsc->cap_flushing_wq,
2080                    check_caps_flush(mdsc, want_flush_tid));
2081
2082         dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid);
2083 }
2084
2085 /*
2086  * called under s_mutex
2087  */
2088 static void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
2089                                    struct ceph_mds_session *session)
2090 {
2091         struct ceph_msg *msg = NULL;
2092         struct ceph_mds_cap_release *head;
2093         struct ceph_mds_cap_item *item;
2094         struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc;
2095         struct ceph_cap *cap;
2096         LIST_HEAD(tmp_list);
2097         int num_cap_releases;
2098         __le32  barrier, *cap_barrier;
2099
2100         down_read(&osdc->lock);
2101         barrier = cpu_to_le32(osdc->epoch_barrier);
2102         up_read(&osdc->lock);
2103
2104         spin_lock(&session->s_cap_lock);
2105 again:
2106         list_splice_init(&session->s_cap_releases, &tmp_list);
2107         num_cap_releases = session->s_num_cap_releases;
2108         session->s_num_cap_releases = 0;
2109         spin_unlock(&session->s_cap_lock);
2110
2111         while (!list_empty(&tmp_list)) {
2112                 if (!msg) {
2113                         msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE,
2114                                         PAGE_SIZE, GFP_NOFS, false);
2115                         if (!msg)
2116                                 goto out_err;
2117                         head = msg->front.iov_base;
2118                         head->num = cpu_to_le32(0);
2119                         msg->front.iov_len = sizeof(*head);
2120
2121                         msg->hdr.version = cpu_to_le16(2);
2122                         msg->hdr.compat_version = cpu_to_le16(1);
2123                 }
2124
2125                 cap = list_first_entry(&tmp_list, struct ceph_cap,
2126                                         session_caps);
2127                 list_del(&cap->session_caps);
2128                 num_cap_releases--;
2129
2130                 head = msg->front.iov_base;
2131                 put_unaligned_le32(get_unaligned_le32(&head->num) + 1,
2132                                    &head->num);
2133                 item = msg->front.iov_base + msg->front.iov_len;
2134                 item->ino = cpu_to_le64(cap->cap_ino);
2135                 item->cap_id = cpu_to_le64(cap->cap_id);
2136                 item->migrate_seq = cpu_to_le32(cap->mseq);
2137                 item->seq = cpu_to_le32(cap->issue_seq);
2138                 msg->front.iov_len += sizeof(*item);
2139
2140                 ceph_put_cap(mdsc, cap);
2141
2142                 if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
2143                         // Append cap_barrier field
2144                         cap_barrier = msg->front.iov_base + msg->front.iov_len;
2145                         *cap_barrier = barrier;
2146                         msg->front.iov_len += sizeof(*cap_barrier);
2147
2148                         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2149                         dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
2150                         ceph_con_send(&session->s_con, msg);
2151                         msg = NULL;
2152                 }
2153         }
2154
2155         BUG_ON(num_cap_releases != 0);
2156
2157         spin_lock(&session->s_cap_lock);
2158         if (!list_empty(&session->s_cap_releases))
2159                 goto again;
2160         spin_unlock(&session->s_cap_lock);
2161
2162         if (msg) {
2163                 // Append cap_barrier field
2164                 cap_barrier = msg->front.iov_base + msg->front.iov_len;
2165                 *cap_barrier = barrier;
2166                 msg->front.iov_len += sizeof(*cap_barrier);
2167
2168                 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2169                 dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
2170                 ceph_con_send(&session->s_con, msg);
2171         }
2172         return;
2173 out_err:
2174         pr_err("send_cap_releases mds%d, failed to allocate message\n",
2175                 session->s_mds);
2176         spin_lock(&session->s_cap_lock);
2177         list_splice(&tmp_list, &session->s_cap_releases);
2178         session->s_num_cap_releases += num_cap_releases;
2179         spin_unlock(&session->s_cap_lock);
2180 }
2181
2182 static void ceph_cap_release_work(struct work_struct *work)
2183 {
2184         struct ceph_mds_session *session =
2185                 container_of(work, struct ceph_mds_session, s_cap_release_work);
2186
2187         mutex_lock(&session->s_mutex);
2188         if (session->s_state == CEPH_MDS_SESSION_OPEN ||
2189             session->s_state == CEPH_MDS_SESSION_HUNG)
2190                 ceph_send_cap_releases(session->s_mdsc, session);
2191         mutex_unlock(&session->s_mutex);
2192         ceph_put_mds_session(session);
2193 }
2194
2195 void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
2196                              struct ceph_mds_session *session)
2197 {
2198         if (mdsc->stopping)
2199                 return;
2200
2201         ceph_get_mds_session(session);
2202         if (queue_work(mdsc->fsc->cap_wq,
2203                        &session->s_cap_release_work)) {
2204                 dout("cap release work queued\n");
2205         } else {
2206                 ceph_put_mds_session(session);
2207                 dout("failed to queue cap release work\n");
2208         }
2209 }
2210
2211 /*
2212  * caller holds session->s_cap_lock
2213  */
2214 void __ceph_queue_cap_release(struct ceph_mds_session *session,
2215                               struct ceph_cap *cap)
2216 {
2217         list_add_tail(&cap->session_caps, &session->s_cap_releases);
2218         session->s_num_cap_releases++;
2219
2220         if (!(session->s_num_cap_releases % CEPH_CAPS_PER_RELEASE))
2221                 ceph_flush_cap_releases(session->s_mdsc, session);
2222 }
2223
2224 static void ceph_cap_reclaim_work(struct work_struct *work)
2225 {
2226         struct ceph_mds_client *mdsc =
2227                 container_of(work, struct ceph_mds_client, cap_reclaim_work);
2228         int ret = ceph_trim_dentries(mdsc);
2229         if (ret == -EAGAIN)
2230                 ceph_queue_cap_reclaim_work(mdsc);
2231 }
2232
2233 void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc)
2234 {
2235         if (mdsc->stopping)
2236                 return;
2237
2238         if (queue_work(mdsc->fsc->cap_wq, &mdsc->cap_reclaim_work)) {
2239                 dout("caps reclaim work queued\n");
2240         } else {
2241                 dout("failed to queue caps release work\n");
2242         }
2243 }
2244
2245 void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr)
2246 {
2247         int val;
2248         if (!nr)
2249                 return;
2250         val = atomic_add_return(nr, &mdsc->cap_reclaim_pending);
2251         if ((val % CEPH_CAPS_PER_RELEASE) < nr) {
2252                 atomic_set(&mdsc->cap_reclaim_pending, 0);
2253                 ceph_queue_cap_reclaim_work(mdsc);
2254         }
2255 }
2256
2257 /*
2258  * requests
2259  */
2260
2261 int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
2262                                     struct inode *dir)
2263 {
2264         struct ceph_inode_info *ci = ceph_inode(dir);
2265         struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
2266         struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
2267         size_t size = sizeof(struct ceph_mds_reply_dir_entry);
2268         unsigned int num_entries;
2269         int order;
2270
2271         spin_lock(&ci->i_ceph_lock);
2272         num_entries = ci->i_files + ci->i_subdirs;
2273         spin_unlock(&ci->i_ceph_lock);
2274         num_entries = max(num_entries, 1U);
2275         num_entries = min(num_entries, opt->max_readdir);
2276
2277         order = get_order(size * num_entries);
2278         while (order >= 0) {
2279                 rinfo->dir_entries = (void*)__get_free_pages(GFP_KERNEL |
2280                                                              __GFP_NOWARN,
2281                                                              order);
2282                 if (rinfo->dir_entries)
2283                         break;
2284                 order--;
2285         }
2286         if (!rinfo->dir_entries)
2287                 return -ENOMEM;
2288
2289         num_entries = (PAGE_SIZE << order) / size;
2290         num_entries = min(num_entries, opt->max_readdir);
2291
2292         rinfo->dir_buf_size = PAGE_SIZE << order;
2293         req->r_num_caps = num_entries + 1;
2294         req->r_args.readdir.max_entries = cpu_to_le32(num_entries);
2295         req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes);
2296         return 0;
2297 }
2298
2299 /*
2300  * Create an mds request.
2301  */
2302 struct ceph_mds_request *
2303 ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
2304 {
2305         struct ceph_mds_request *req;
2306
2307         req = kmem_cache_zalloc(ceph_mds_request_cachep, GFP_NOFS);
2308         if (!req)
2309                 return ERR_PTR(-ENOMEM);
2310
2311         mutex_init(&req->r_fill_mutex);
2312         req->r_mdsc = mdsc;
2313         req->r_started = jiffies;
2314         req->r_start_latency = ktime_get();
2315         req->r_resend_mds = -1;
2316         INIT_LIST_HEAD(&req->r_unsafe_dir_item);
2317         INIT_LIST_HEAD(&req->r_unsafe_target_item);
2318         req->r_fmode = -1;
2319         kref_init(&req->r_kref);
2320         RB_CLEAR_NODE(&req->r_node);
2321         INIT_LIST_HEAD(&req->r_wait);
2322         init_completion(&req->r_completion);
2323         init_completion(&req->r_safe_completion);
2324         INIT_LIST_HEAD(&req->r_unsafe_item);
2325
2326         ktime_get_coarse_real_ts64(&req->r_stamp);
2327
2328         req->r_op = op;
2329         req->r_direct_mode = mode;
2330         return req;
2331 }
2332
2333 /*
2334  * return oldest (lowest) request, tid in request tree, 0 if none.
2335  *
2336  * called under mdsc->mutex.
2337  */
2338 static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc)
2339 {
2340         if (RB_EMPTY_ROOT(&mdsc->request_tree))
2341                 return NULL;
2342         return rb_entry(rb_first(&mdsc->request_tree),
2343                         struct ceph_mds_request, r_node);
2344 }
2345
2346 static inline  u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
2347 {
2348         return mdsc->oldest_tid;
2349 }
2350
2351 /*
2352  * Build a dentry's path.  Allocate on heap; caller must kfree.  Based
2353  * on build_path_from_dentry in fs/cifs/dir.c.
2354  *
2355  * If @stop_on_nosnap, generate path relative to the first non-snapped
2356  * inode.
2357  *
2358  * Encode hidden .snap dirs as a double /, i.e.
2359  *   foo/.snap/bar -> foo//bar
2360  */
2361 char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *pbase,
2362                            int stop_on_nosnap)
2363 {
2364         struct dentry *temp;
2365         char *path;
2366         int pos;
2367         unsigned seq;
2368         u64 base;
2369
2370         if (!dentry)
2371                 return ERR_PTR(-EINVAL);
2372
2373         path = __getname();
2374         if (!path)
2375                 return ERR_PTR(-ENOMEM);
2376 retry:
2377         pos = PATH_MAX - 1;
2378         path[pos] = '\0';
2379
2380         seq = read_seqbegin(&rename_lock);
2381         rcu_read_lock();
2382         temp = dentry;
2383         for (;;) {
2384                 struct inode *inode;
2385
2386                 spin_lock(&temp->d_lock);
2387                 inode = d_inode(temp);
2388                 if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
2389                         dout("build_path path+%d: %p SNAPDIR\n",
2390                              pos, temp);
2391                 } else if (stop_on_nosnap && inode && dentry != temp &&
2392                            ceph_snap(inode) == CEPH_NOSNAP) {
2393                         spin_unlock(&temp->d_lock);
2394                         pos++; /* get rid of any prepended '/' */
2395                         break;
2396                 } else {
2397                         pos -= temp->d_name.len;
2398                         if (pos < 0) {
2399                                 spin_unlock(&temp->d_lock);
2400                                 break;
2401                         }
2402                         memcpy(path + pos, temp->d_name.name, temp->d_name.len);
2403                 }
2404                 spin_unlock(&temp->d_lock);
2405                 temp = READ_ONCE(temp->d_parent);
2406
2407                 /* Are we at the root? */
2408                 if (IS_ROOT(temp))
2409                         break;
2410
2411                 /* Are we out of buffer? */
2412                 if (--pos < 0)
2413                         break;
2414
2415                 path[pos] = '/';
2416         }
2417         base = ceph_ino(d_inode(temp));
2418         rcu_read_unlock();
2419
2420         if (read_seqretry(&rename_lock, seq))
2421                 goto retry;
2422
2423         if (pos < 0) {
2424                 /*
2425                  * A rename didn't occur, but somehow we didn't end up where
2426                  * we thought we would. Throw a warning and try again.
2427                  */
2428                 pr_warn("build_path did not end path lookup where "
2429                         "expected, pos is %d\n", pos);
2430                 goto retry;
2431         }
2432
2433         *pbase = base;
2434         *plen = PATH_MAX - 1 - pos;
2435         dout("build_path on %p %d built %llx '%.*s'\n",
2436              dentry, d_count(dentry), base, *plen, path + pos);
2437         return path + pos;
2438 }
2439
2440 static int build_dentry_path(struct dentry *dentry, struct inode *dir,
2441                              const char **ppath, int *ppathlen, u64 *pino,
2442                              bool *pfreepath, bool parent_locked)
2443 {
2444         char *path;
2445
2446         rcu_read_lock();
2447         if (!dir)
2448                 dir = d_inode_rcu(dentry->d_parent);
2449         if (dir && parent_locked && ceph_snap(dir) == CEPH_NOSNAP) {
2450                 *pino = ceph_ino(dir);
2451                 rcu_read_unlock();
2452                 *ppath = dentry->d_name.name;
2453                 *ppathlen = dentry->d_name.len;
2454                 return 0;
2455         }
2456         rcu_read_unlock();
2457         path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
2458         if (IS_ERR(path))
2459                 return PTR_ERR(path);
2460         *ppath = path;
2461         *pfreepath = true;
2462         return 0;
2463 }
2464
2465 static int build_inode_path(struct inode *inode,
2466                             const char **ppath, int *ppathlen, u64 *pino,
2467                             bool *pfreepath)
2468 {
2469         struct dentry *dentry;
2470         char *path;
2471
2472         if (ceph_snap(inode) == CEPH_NOSNAP) {
2473                 *pino = ceph_ino(inode);
2474                 *ppathlen = 0;
2475                 return 0;
2476         }
2477         dentry = d_find_alias(inode);
2478         path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
2479         dput(dentry);
2480         if (IS_ERR(path))
2481                 return PTR_ERR(path);
2482         *ppath = path;
2483         *pfreepath = true;
2484         return 0;
2485 }
2486
2487 /*
2488  * request arguments may be specified via an inode *, a dentry *, or
2489  * an explicit ino+path.
2490  */
2491 static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
2492                                   struct inode *rdiri, const char *rpath,
2493                                   u64 rino, const char **ppath, int *pathlen,
2494                                   u64 *ino, bool *freepath, bool parent_locked)
2495 {
2496         int r = 0;
2497
2498         if (rinode) {
2499                 r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
2500                 dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
2501                      ceph_snap(rinode));
2502         } else if (rdentry) {
2503                 r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
2504                                         freepath, parent_locked);
2505                 dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
2506                      *ppath);
2507         } else if (rpath || rino) {
2508                 *ino = rino;
2509                 *ppath = rpath;
2510                 *pathlen = rpath ? strlen(rpath) : 0;
2511                 dout(" path %.*s\n", *pathlen, rpath);
2512         }
2513
2514         return r;
2515 }
2516
2517 /*
2518  * called under mdsc->mutex
2519  */
2520 static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
2521                                                struct ceph_mds_request *req,
2522                                                int mds, bool drop_cap_releases)
2523 {
2524         struct ceph_msg *msg;
2525         struct ceph_mds_request_head *head;
2526         const char *path1 = NULL;
2527         const char *path2 = NULL;
2528         u64 ino1 = 0, ino2 = 0;
2529         int pathlen1 = 0, pathlen2 = 0;
2530         bool freepath1 = false, freepath2 = false;
2531         int len;
2532         u16 releases;
2533         void *p, *end;
2534         int ret;
2535
2536         ret = set_request_path_attr(req->r_inode, req->r_dentry,
2537                               req->r_parent, req->r_path1, req->r_ino1.ino,
2538                               &path1, &pathlen1, &ino1, &freepath1,
2539                               test_bit(CEPH_MDS_R_PARENT_LOCKED,
2540                                         &req->r_req_flags));
2541         if (ret < 0) {
2542                 msg = ERR_PTR(ret);
2543                 goto out;
2544         }
2545
2546         /* If r_old_dentry is set, then assume that its parent is locked */
2547         ret = set_request_path_attr(NULL, req->r_old_dentry,
2548                               req->r_old_dentry_dir,
2549                               req->r_path2, req->r_ino2.ino,
2550                               &path2, &pathlen2, &ino2, &freepath2, true);
2551         if (ret < 0) {
2552                 msg = ERR_PTR(ret);
2553                 goto out_free1;
2554         }
2555
2556         len = sizeof(*head) +
2557                 pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) +
2558                 sizeof(struct ceph_timespec);
2559
2560         /* calculate (max) length for cap releases */
2561         len += sizeof(struct ceph_mds_request_release) *
2562                 (!!req->r_inode_drop + !!req->r_dentry_drop +
2563                  !!req->r_old_inode_drop + !!req->r_old_dentry_drop);
2564         if (req->r_dentry_drop)
2565                 len += pathlen1;
2566         if (req->r_old_dentry_drop)
2567                 len += pathlen2;
2568
2569         msg = ceph_msg_new2(CEPH_MSG_CLIENT_REQUEST, len, 1, GFP_NOFS, false);
2570         if (!msg) {
2571                 msg = ERR_PTR(-ENOMEM);
2572                 goto out_free2;
2573         }
2574
2575         msg->hdr.version = cpu_to_le16(2);
2576         msg->hdr.tid = cpu_to_le64(req->r_tid);
2577
2578         head = msg->front.iov_base;
2579         p = msg->front.iov_base + sizeof(*head);
2580         end = msg->front.iov_base + msg->front.iov_len;
2581
2582         head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
2583         head->op = cpu_to_le32(req->r_op);
2584         head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, req->r_uid));
2585         head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, req->r_gid));
2586         head->ino = cpu_to_le64(req->r_deleg_ino);
2587         head->args = req->r_args;
2588
2589         ceph_encode_filepath(&p, end, ino1, path1);
2590         ceph_encode_filepath(&p, end, ino2, path2);
2591
2592         /* make note of release offset, in case we need to replay */
2593         req->r_request_release_offset = p - msg->front.iov_base;
2594
2595         /* cap releases */
2596         releases = 0;
2597         if (req->r_inode_drop)
2598                 releases += ceph_encode_inode_release(&p,
2599                       req->r_inode ? req->r_inode : d_inode(req->r_dentry),
2600                       mds, req->r_inode_drop, req->r_inode_unless,
2601                       req->r_op == CEPH_MDS_OP_READDIR);
2602         if (req->r_dentry_drop)
2603                 releases += ceph_encode_dentry_release(&p, req->r_dentry,
2604                                 req->r_parent, mds, req->r_dentry_drop,
2605                                 req->r_dentry_unless);
2606         if (req->r_old_dentry_drop)
2607                 releases += ceph_encode_dentry_release(&p, req->r_old_dentry,
2608                                 req->r_old_dentry_dir, mds,
2609                                 req->r_old_dentry_drop,
2610                                 req->r_old_dentry_unless);
2611         if (req->r_old_inode_drop)
2612                 releases += ceph_encode_inode_release(&p,
2613                       d_inode(req->r_old_dentry),
2614                       mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
2615
2616         if (drop_cap_releases) {
2617                 releases = 0;
2618                 p = msg->front.iov_base + req->r_request_release_offset;
2619         }
2620
2621         head->num_releases = cpu_to_le16(releases);
2622
2623         /* time stamp */
2624         {
2625                 struct ceph_timespec ts;
2626                 ceph_encode_timespec64(&ts, &req->r_stamp);
2627                 ceph_encode_copy(&p, &ts, sizeof(ts));
2628         }
2629
2630         if (WARN_ON_ONCE(p > end)) {
2631                 ceph_msg_put(msg);
2632                 msg = ERR_PTR(-ERANGE);
2633                 goto out_free2;
2634         }
2635
2636         msg->front.iov_len = p - msg->front.iov_base;
2637         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2638
2639         if (req->r_pagelist) {
2640                 struct ceph_pagelist *pagelist = req->r_pagelist;
2641                 ceph_msg_data_add_pagelist(msg, pagelist);
2642                 msg->hdr.data_len = cpu_to_le32(pagelist->length);
2643         } else {
2644                 msg->hdr.data_len = 0;
2645         }
2646
2647         msg->hdr.data_off = cpu_to_le16(0);
2648
2649 out_free2:
2650         if (freepath2)
2651                 ceph_mdsc_free_path((char *)path2, pathlen2);
2652 out_free1:
2653         if (freepath1)
2654                 ceph_mdsc_free_path((char *)path1, pathlen1);
2655 out:
2656         return msg;
2657 }
2658
2659 /*
2660  * called under mdsc->mutex if error, under no mutex if
2661  * success.
2662  */
2663 static void complete_request(struct ceph_mds_client *mdsc,
2664                              struct ceph_mds_request *req)
2665 {
2666         req->r_end_latency = ktime_get();
2667
2668         if (req->r_callback)
2669                 req->r_callback(mdsc, req);
2670         complete_all(&req->r_completion);
2671 }
2672
2673 /*
2674  * called under mdsc->mutex
2675  */
2676 static int __prepare_send_request(struct ceph_mds_client *mdsc,
2677                                   struct ceph_mds_request *req,
2678                                   int mds, bool drop_cap_releases)
2679 {
2680         struct ceph_mds_request_head *rhead;
2681         struct ceph_msg *msg;
2682         int flags = 0;
2683
2684         req->r_attempts++;
2685         if (req->r_inode) {
2686                 struct ceph_cap *cap =
2687                         ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds);
2688
2689                 if (cap)
2690                         req->r_sent_on_mseq = cap->mseq;
2691                 else
2692                         req->r_sent_on_mseq = -1;
2693         }
2694         dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
2695              req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
2696
2697         if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
2698                 void *p;
2699                 /*
2700                  * Replay.  Do not regenerate message (and rebuild
2701                  * paths, etc.); just use the original message.
2702                  * Rebuilding paths will break for renames because
2703                  * d_move mangles the src name.
2704                  */
2705                 msg = req->r_request;
2706                 rhead = msg->front.iov_base;
2707
2708                 flags = le32_to_cpu(rhead->flags);
2709                 flags |= CEPH_MDS_FLAG_REPLAY;
2710                 rhead->flags = cpu_to_le32(flags);
2711
2712                 if (req->r_target_inode)
2713                         rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
2714
2715                 rhead->num_retry = req->r_attempts - 1;
2716
2717                 /* remove cap/dentry releases from message */
2718                 rhead->num_releases = 0;
2719
2720                 /* time stamp */
2721                 p = msg->front.iov_base + req->r_request_release_offset;
2722                 {
2723                         struct ceph_timespec ts;
2724                         ceph_encode_timespec64(&ts, &req->r_stamp);
2725                         ceph_encode_copy(&p, &ts, sizeof(ts));
2726                 }
2727
2728                 msg->front.iov_len = p - msg->front.iov_base;
2729                 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2730                 return 0;
2731         }
2732
2733         if (req->r_request) {
2734                 ceph_msg_put(req->r_request);
2735                 req->r_request = NULL;
2736         }
2737         msg = create_request_message(mdsc, req, mds, drop_cap_releases);
2738         if (IS_ERR(msg)) {
2739                 req->r_err = PTR_ERR(msg);
2740                 return PTR_ERR(msg);
2741         }
2742         req->r_request = msg;
2743
2744         rhead = msg->front.iov_base;
2745         rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
2746         if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
2747                 flags |= CEPH_MDS_FLAG_REPLAY;
2748         if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags))
2749                 flags |= CEPH_MDS_FLAG_ASYNC;
2750         if (req->r_parent)
2751                 flags |= CEPH_MDS_FLAG_WANT_DENTRY;
2752         rhead->flags = cpu_to_le32(flags);
2753         rhead->num_fwd = req->r_num_fwd;
2754         rhead->num_retry = req->r_attempts - 1;
2755
2756         dout(" r_parent = %p\n", req->r_parent);
2757         return 0;
2758 }
2759
2760 /*
2761  * called under mdsc->mutex
2762  */
2763 static int __send_request(struct ceph_mds_client *mdsc,
2764                           struct ceph_mds_session *session,
2765                           struct ceph_mds_request *req,
2766                           bool drop_cap_releases)
2767 {
2768         int err;
2769
2770         err = __prepare_send_request(mdsc, req, session->s_mds,
2771                                      drop_cap_releases);
2772         if (!err) {
2773                 ceph_msg_get(req->r_request);
2774                 ceph_con_send(&session->s_con, req->r_request);
2775         }
2776
2777         return err;
2778 }
2779
2780 /*
2781  * send request, or put it on the appropriate wait list.
2782  */
2783 static void __do_request(struct ceph_mds_client *mdsc,
2784                         struct ceph_mds_request *req)
2785 {
2786         struct ceph_mds_session *session = NULL;
2787         int mds = -1;
2788         int err = 0;
2789         bool random;
2790
2791         if (req->r_err || test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
2792                 if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
2793                         __unregister_request(mdsc, req);
2794                 return;
2795         }
2796
2797         if (req->r_timeout &&
2798             time_after_eq(jiffies, req->r_started + req->r_timeout)) {
2799                 dout("do_request timed out\n");
2800                 err = -ETIMEDOUT;
2801                 goto finish;
2802         }
2803         if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
2804                 dout("do_request forced umount\n");
2805                 err = -EIO;
2806                 goto finish;
2807         }
2808         if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) {
2809                 if (mdsc->mdsmap_err) {
2810                         err = mdsc->mdsmap_err;
2811                         dout("do_request mdsmap err %d\n", err);
2812                         goto finish;
2813                 }
2814                 if (mdsc->mdsmap->m_epoch == 0) {
2815                         dout("do_request no mdsmap, waiting for map\n");
2816                         list_add(&req->r_wait, &mdsc->waiting_for_map);
2817                         return;
2818                 }
2819                 if (!(mdsc->fsc->mount_options->flags &
2820                       CEPH_MOUNT_OPT_MOUNTWAIT) &&
2821                     !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) {
2822                         err = -EHOSTUNREACH;
2823                         goto finish;
2824                 }
2825         }
2826
2827         put_request_session(req);
2828
2829         mds = __choose_mds(mdsc, req, &random);
2830         if (mds < 0 ||
2831             ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
2832                 if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) {
2833                         err = -EJUKEBOX;
2834                         goto finish;
2835                 }
2836                 dout("do_request no mds or not active, waiting for map\n");
2837                 list_add(&req->r_wait, &mdsc->waiting_for_map);
2838                 return;
2839         }
2840
2841         /* get, open session */
2842         session = __ceph_lookup_mds_session(mdsc, mds);
2843         if (!session) {
2844                 session = register_session(mdsc, mds);
2845                 if (IS_ERR(session)) {
2846                         err = PTR_ERR(session);
2847                         goto finish;
2848                 }
2849         }
2850         req->r_session = ceph_get_mds_session(session);
2851
2852         dout("do_request mds%d session %p state %s\n", mds, session,
2853              ceph_session_state_name(session->s_state));
2854         if (session->s_state != CEPH_MDS_SESSION_OPEN &&
2855             session->s_state != CEPH_MDS_SESSION_HUNG) {
2856                 if (session->s_state == CEPH_MDS_SESSION_REJECTED) {
2857                         err = -EACCES;
2858                         goto out_session;
2859                 }
2860                 /*
2861                  * We cannot queue async requests since the caps and delegated
2862                  * inodes are bound to the session. Just return -EJUKEBOX and
2863                  * let the caller retry a sync request in that case.
2864                  */
2865                 if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) {
2866                         err = -EJUKEBOX;
2867                         goto out_session;
2868                 }
2869                 if (session->s_state == CEPH_MDS_SESSION_NEW ||
2870                     session->s_state == CEPH_MDS_SESSION_CLOSING) {
2871                         err = __open_session(mdsc, session);
2872                         if (err)
2873                                 goto out_session;
2874                         /* retry the same mds later */
2875                         if (random)
2876                                 req->r_resend_mds = mds;
2877                 }
2878                 list_add(&req->r_wait, &session->s_waiting);
2879                 goto out_session;
2880         }
2881
2882         /* send request */
2883         req->r_resend_mds = -1;   /* forget any previous mds hint */
2884
2885         if (req->r_request_started == 0)   /* note request start time */
2886                 req->r_request_started = jiffies;
2887
2888         err = __send_request(mdsc, session, req, false);
2889
2890 out_session:
2891         ceph_put_mds_session(session);
2892 finish:
2893         if (err) {
2894                 dout("__do_request early error %d\n", err);
2895                 req->r_err = err;
2896                 complete_request(mdsc, req);
2897                 __unregister_request(mdsc, req);
2898         }
2899         return;
2900 }
2901
2902 /*
2903  * called under mdsc->mutex
2904  */
2905 static void __wake_requests(struct ceph_mds_client *mdsc,
2906                             struct list_head *head)
2907 {
2908         struct ceph_mds_request *req;
2909         LIST_HEAD(tmp_list);
2910
2911         list_splice_init(head, &tmp_list);
2912
2913         while (!list_empty(&tmp_list)) {
2914                 req = list_entry(tmp_list.next,
2915                                  struct ceph_mds_request, r_wait);
2916                 list_del_init(&req->r_wait);
2917                 dout(" wake request %p tid %llu\n", req, req->r_tid);
2918                 __do_request(mdsc, req);
2919         }
2920 }
2921
2922 /*
2923  * Wake up threads with requests pending for @mds, so that they can
2924  * resubmit their requests to a possibly different mds.
2925  */
2926 static void kick_requests(struct ceph_mds_client *mdsc, int mds)
2927 {
2928         struct ceph_mds_request *req;
2929         struct rb_node *p = rb_first(&mdsc->request_tree);
2930
2931         dout("kick_requests mds%d\n", mds);
2932         while (p) {
2933                 req = rb_entry(p, struct ceph_mds_request, r_node);
2934                 p = rb_next(p);
2935                 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
2936                         continue;
2937                 if (req->r_attempts > 0)
2938                         continue; /* only new requests */
2939                 if (req->r_session &&
2940                     req->r_session->s_mds == mds) {
2941                         dout(" kicking tid %llu\n", req->r_tid);
2942                         list_del_init(&req->r_wait);
2943                         __do_request(mdsc, req);
2944                 }
2945         }
2946 }
2947
2948 int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
2949                               struct ceph_mds_request *req)
2950 {
2951         int err = 0;
2952
2953         /* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */
2954         if (req->r_inode)
2955                 ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
2956         if (req->r_parent) {
2957                 struct ceph_inode_info *ci = ceph_inode(req->r_parent);
2958                 int fmode = (req->r_op & CEPH_MDS_OP_WRITE) ?
2959                             CEPH_FILE_MODE_WR : CEPH_FILE_MODE_RD;
2960                 spin_lock(&ci->i_ceph_lock);
2961                 ceph_take_cap_refs(ci, CEPH_CAP_PIN, false);
2962                 __ceph_touch_fmode(ci, mdsc, fmode);
2963                 spin_unlock(&ci->i_ceph_lock);
2964                 ihold(req->r_parent);
2965         }
2966         if (req->r_old_dentry_dir)
2967                 ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
2968                                   CEPH_CAP_PIN);
2969
2970         if (req->r_inode) {
2971                 err = ceph_wait_on_async_create(req->r_inode);
2972                 if (err) {
2973                         dout("%s: wait for async create returned: %d\n",
2974                              __func__, err);
2975                         return err;
2976                 }
2977         }
2978
2979         if (!err && req->r_old_inode) {
2980                 err = ceph_wait_on_async_create(req->r_old_inode);
2981                 if (err) {
2982                         dout("%s: wait for async create returned: %d\n",
2983                              __func__, err);
2984                         return err;
2985                 }
2986         }
2987
2988         dout("submit_request on %p for inode %p\n", req, dir);
2989         mutex_lock(&mdsc->mutex);
2990         __register_request(mdsc, req, dir);
2991         __do_request(mdsc, req);
2992         err = req->r_err;
2993         mutex_unlock(&mdsc->mutex);
2994         return err;
2995 }
2996
2997 static int ceph_mdsc_wait_request(struct ceph_mds_client *mdsc,
2998                                   struct ceph_mds_request *req)
2999 {
3000         int err;
3001
3002         /* wait */
3003         dout("do_request waiting\n");
3004         if (!req->r_timeout && req->r_wait_for_completion) {
3005                 err = req->r_wait_for_completion(mdsc, req);
3006         } else {
3007                 long timeleft = wait_for_completion_killable_timeout(
3008                                         &req->r_completion,
3009                                         ceph_timeout_jiffies(req->r_timeout));
3010                 if (timeleft > 0)
3011                         err = 0;
3012                 else if (!timeleft)
3013                         err = -ETIMEDOUT;  /* timed out */
3014                 else
3015                         err = timeleft;  /* killed */
3016         }
3017         dout("do_request waited, got %d\n", err);
3018         mutex_lock(&mdsc->mutex);
3019
3020         /* only abort if we didn't race with a real reply */
3021         if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
3022                 err = le32_to_cpu(req->r_reply_info.head->result);
3023         } else if (err < 0) {
3024                 dout("aborted request %lld with %d\n", req->r_tid, err);
3025
3026                 /*
3027                  * ensure we aren't running concurrently with
3028                  * ceph_fill_trace or ceph_readdir_prepopulate, which
3029                  * rely on locks (dir mutex) held by our caller.
3030                  */
3031                 mutex_lock(&req->r_fill_mutex);
3032                 req->r_err = err;
3033                 set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
3034                 mutex_unlock(&req->r_fill_mutex);
3035
3036                 if (req->r_parent &&
3037                     (req->r_op & CEPH_MDS_OP_WRITE))
3038                         ceph_invalidate_dir_request(req);
3039         } else {
3040                 err = req->r_err;
3041         }
3042
3043         mutex_unlock(&mdsc->mutex);
3044         return err;
3045 }
3046
3047 /*
3048  * Synchrously perform an mds request.  Take care of all of the
3049  * session setup, forwarding, retry details.
3050  */
3051 int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
3052                          struct inode *dir,
3053                          struct ceph_mds_request *req)
3054 {
3055         int err;
3056
3057         dout("do_request on %p\n", req);
3058
3059         /* issue */
3060         err = ceph_mdsc_submit_request(mdsc, dir, req);
3061         if (!err)
3062                 err = ceph_mdsc_wait_request(mdsc, req);
3063         dout("do_request %p done, result %d\n", req, err);
3064         return err;
3065 }
3066
3067 /*
3068  * Invalidate dir's completeness, dentry lease state on an aborted MDS
3069  * namespace request.
3070  */
3071 void ceph_invalidate_dir_request(struct ceph_mds_request *req)
3072 {
3073         struct inode *dir = req->r_parent;
3074         struct inode *old_dir = req->r_old_dentry_dir;
3075
3076         dout("invalidate_dir_request %p %p (complete, lease(s))\n", dir, old_dir);
3077
3078         ceph_dir_clear_complete(dir);
3079         if (old_dir)
3080                 ceph_dir_clear_complete(old_dir);
3081         if (req->r_dentry)
3082                 ceph_invalidate_dentry_lease(req->r_dentry);
3083         if (req->r_old_dentry)
3084                 ceph_invalidate_dentry_lease(req->r_old_dentry);
3085 }
3086
3087 /*
3088  * Handle mds reply.
3089  *
3090  * We take the session mutex and parse and process the reply immediately.
3091  * This preserves the logical ordering of replies, capabilities, etc., sent
3092  * by the MDS as they are applied to our local cache.
3093  */
3094 static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
3095 {
3096         struct ceph_mds_client *mdsc = session->s_mdsc;
3097         struct ceph_mds_request *req;
3098         struct ceph_mds_reply_head *head = msg->front.iov_base;
3099         struct ceph_mds_reply_info_parsed *rinfo;  /* parsed reply info */
3100         struct ceph_snap_realm *realm;
3101         u64 tid;
3102         int err, result;
3103         int mds = session->s_mds;
3104
3105         if (msg->front.iov_len < sizeof(*head)) {
3106                 pr_err("mdsc_handle_reply got corrupt (short) reply\n");
3107                 ceph_msg_dump(msg);
3108                 return;
3109         }
3110
3111         /* get request, session */
3112         tid = le64_to_cpu(msg->hdr.tid);
3113         mutex_lock(&mdsc->mutex);
3114         req = lookup_get_request(mdsc, tid);
3115         if (!req) {
3116                 dout("handle_reply on unknown tid %llu\n", tid);
3117                 mutex_unlock(&mdsc->mutex);
3118                 return;
3119         }
3120         dout("handle_reply %p\n", req);
3121
3122         /* correct session? */
3123         if (req->r_session != session) {
3124                 pr_err("mdsc_handle_reply got %llu on session mds%d"
3125                        " not mds%d\n", tid, session->s_mds,
3126                        req->r_session ? req->r_session->s_mds : -1);
3127                 mutex_unlock(&mdsc->mutex);
3128                 goto out;
3129         }
3130
3131         /* dup? */
3132         if ((test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags) && !head->safe) ||
3133             (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags) && head->safe)) {
3134                 pr_warn("got a dup %s reply on %llu from mds%d\n",
3135                            head->safe ? "safe" : "unsafe", tid, mds);
3136                 mutex_unlock(&mdsc->mutex);
3137                 goto out;
3138         }
3139         if (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags)) {
3140                 pr_warn("got unsafe after safe on %llu from mds%d\n",
3141                            tid, mds);
3142                 mutex_unlock(&mdsc->mutex);
3143                 goto out;
3144         }
3145
3146         result = le32_to_cpu(head->result);
3147
3148         /*
3149          * Handle an ESTALE
3150          * if we're not talking to the authority, send to them
3151          * if the authority has changed while we weren't looking,
3152          * send to new authority
3153          * Otherwise we just have to return an ESTALE
3154          */
3155         if (result == -ESTALE) {
3156                 dout("got ESTALE on request %llu\n", req->r_tid);
3157                 req->r_resend_mds = -1;
3158                 if (req->r_direct_mode != USE_AUTH_MDS) {
3159                         dout("not using auth, setting for that now\n");
3160                         req->r_direct_mode = USE_AUTH_MDS;
3161                         __do_request(mdsc, req);
3162                         mutex_unlock(&mdsc->mutex);
3163                         goto out;
3164                 } else  {
3165                         int mds = __choose_mds(mdsc, req, NULL);
3166                         if (mds >= 0 && mds != req->r_session->s_mds) {
3167                                 dout("but auth changed, so resending\n");
3168                                 __do_request(mdsc, req);
3169                                 mutex_unlock(&mdsc->mutex);
3170                                 goto out;
3171                         }
3172                 }
3173                 dout("have to return ESTALE on request %llu\n", req->r_tid);
3174         }
3175
3176
3177         if (head->safe) {
3178                 set_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags);
3179                 __unregister_request(mdsc, req);
3180
3181                 /* last request during umount? */
3182                 if (mdsc->stopping && !__get_oldest_req(mdsc))
3183                         complete_all(&mdsc->safe_umount_waiters);
3184
3185                 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
3186                         /*
3187                          * We already handled the unsafe response, now do the
3188                          * cleanup.  No need to examine the response; the MDS
3189                          * doesn't include any result info in the safe
3190                          * response.  And even if it did, there is nothing
3191                          * useful we could do with a revised return value.
3192                          */
3193                         dout("got safe reply %llu, mds%d\n", tid, mds);
3194
3195                         mutex_unlock(&mdsc->mutex);
3196                         goto out;
3197                 }
3198         } else {
3199                 set_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags);
3200                 list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
3201         }
3202
3203         dout("handle_reply tid %lld result %d\n", tid, result);
3204         rinfo = &req->r_reply_info;
3205         if (test_bit(CEPHFS_FEATURE_REPLY_ENCODING, &session->s_features))
3206                 err = parse_reply_info(session, msg, rinfo, (u64)-1);
3207         else
3208                 err = parse_reply_info(session, msg, rinfo, session->s_con.peer_features);
3209         mutex_unlock(&mdsc->mutex);
3210
3211         mutex_lock(&session->s_mutex);
3212         if (err < 0) {
3213                 pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid);
3214                 ceph_msg_dump(msg);
3215                 goto out_err;
3216         }
3217
3218         /* snap trace */
3219         realm = NULL;
3220         if (rinfo->snapblob_len) {
3221                 down_write(&mdsc->snap_rwsem);
3222                 ceph_update_snap_trace(mdsc, rinfo->snapblob,
3223                                 rinfo->snapblob + rinfo->snapblob_len,
3224                                 le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP,
3225                                 &realm);
3226                 downgrade_write(&mdsc->snap_rwsem);
3227         } else {
3228                 down_read(&mdsc->snap_rwsem);
3229         }
3230
3231         /* insert trace into our cache */
3232         mutex_lock(&req->r_fill_mutex);
3233         current->journal_info = req;
3234         err = ceph_fill_trace(mdsc->fsc->sb, req);
3235         if (err == 0) {
3236                 if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
3237                                     req->r_op == CEPH_MDS_OP_LSSNAP))
3238                         ceph_readdir_prepopulate(req, req->r_session);
3239         }
3240         current->journal_info = NULL;
3241         mutex_unlock(&req->r_fill_mutex);
3242
3243         up_read(&mdsc->snap_rwsem);
3244         if (realm)
3245                 ceph_put_snap_realm(mdsc, realm);
3246
3247         if (err == 0) {
3248                 if (req->r_target_inode &&
3249                     test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
3250                         struct ceph_inode_info *ci =
3251                                 ceph_inode(req->r_target_inode);
3252                         spin_lock(&ci->i_unsafe_lock);
3253                         list_add_tail(&req->r_unsafe_target_item,
3254                                       &ci->i_unsafe_iops);
3255                         spin_unlock(&ci->i_unsafe_lock);
3256                 }
3257
3258                 ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
3259         }
3260 out_err:
3261         mutex_lock(&mdsc->mutex);
3262         if (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
3263                 if (err) {
3264                         req->r_err = err;
3265                 } else {
3266                         req->r_reply =  ceph_msg_get(msg);
3267                         set_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags);
3268                 }
3269         } else {
3270                 dout("reply arrived after request %lld was aborted\n", tid);
3271         }
3272         mutex_unlock(&mdsc->mutex);
3273
3274         mutex_unlock(&session->s_mutex);
3275
3276         /* kick calling process */
3277         complete_request(mdsc, req);
3278
3279         ceph_update_metadata_latency(&mdsc->metric, req->r_start_latency,
3280                                      req->r_end_latency, err);
3281 out:
3282         ceph_mdsc_put_request(req);
3283         return;
3284 }
3285
3286
3287
3288 /*
3289  * handle mds notification that our request has been forwarded.
3290  */
3291 static void handle_forward(struct ceph_mds_client *mdsc,
3292                            struct ceph_mds_session *session,
3293                            struct ceph_msg *msg)
3294 {
3295         struct ceph_mds_request *req;
3296         u64 tid = le64_to_cpu(msg->hdr.tid);
3297         u32 next_mds;
3298         u32 fwd_seq;
3299         int err = -EINVAL;
3300         void *p = msg->front.iov_base;
3301         void *end = p + msg->front.iov_len;
3302
3303         ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3304         next_mds = ceph_decode_32(&p);
3305         fwd_seq = ceph_decode_32(&p);
3306
3307         mutex_lock(&mdsc->mutex);
3308         req = lookup_get_request(mdsc, tid);
3309         if (!req) {
3310                 dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
3311                 goto out;  /* dup reply? */
3312         }
3313
3314         if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
3315                 dout("forward tid %llu aborted, unregistering\n", tid);
3316                 __unregister_request(mdsc, req);
3317         } else if (fwd_seq <= req->r_num_fwd) {
3318                 dout("forward tid %llu to mds%d - old seq %d <= %d\n",
3319                      tid, next_mds, req->r_num_fwd, fwd_seq);
3320         } else {
3321                 /* resend. forward race not possible; mds would drop */
3322                 dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
3323                 BUG_ON(req->r_err);
3324                 BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags));
3325                 req->r_attempts = 0;
3326                 req->r_num_fwd = fwd_seq;
3327                 req->r_resend_mds = next_mds;
3328                 put_request_session(req);
3329                 __do_request(mdsc, req);
3330         }
3331         ceph_mdsc_put_request(req);
3332 out:
3333         mutex_unlock(&mdsc->mutex);
3334         return;
3335
3336 bad:
3337         pr_err("mdsc_handle_forward decode error err=%d\n", err);
3338 }
3339
3340 static int __decode_session_metadata(void **p, void *end,
3341                                      bool *blocklisted)
3342 {
3343         /* map<string,string> */
3344         u32 n;
3345         bool err_str;
3346         ceph_decode_32_safe(p, end, n, bad);
3347         while (n-- > 0) {
3348                 u32 len;
3349                 ceph_decode_32_safe(p, end, len, bad);
3350                 ceph_decode_need(p, end, len, bad);
3351                 err_str = !strncmp(*p, "error_string", len);
3352                 *p += len;
3353                 ceph_decode_32_safe(p, end, len, bad);
3354                 ceph_decode_need(p, end, len, bad);
3355                 /*
3356                  * Match "blocklisted (blacklisted)" from newer MDSes,
3357                  * or "blacklisted" from older MDSes.
3358                  */
3359                 if (err_str && strnstr(*p, "blacklisted", len))
3360                         *blocklisted = true;
3361                 *p += len;
3362         }
3363         return 0;
3364 bad:
3365         return -1;
3366 }
3367
3368 /*
3369  * handle a mds session control message
3370  */
3371 static void handle_session(struct ceph_mds_session *session,
3372                            struct ceph_msg *msg)
3373 {
3374         struct ceph_mds_client *mdsc = session->s_mdsc;
3375         int mds = session->s_mds;
3376         int msg_version = le16_to_cpu(msg->hdr.version);
3377         void *p = msg->front.iov_base;
3378         void *end = p + msg->front.iov_len;
3379         struct ceph_mds_session_head *h;
3380         u32 op;
3381         u64 seq, features = 0;
3382         int wake = 0;
3383         bool blocklisted = false;
3384
3385         /* decode */
3386         ceph_decode_need(&p, end, sizeof(*h), bad);
3387         h = p;
3388         p += sizeof(*h);
3389
3390         op = le32_to_cpu(h->op);
3391         seq = le64_to_cpu(h->seq);
3392
3393         if (msg_version >= 3) {
3394                 u32 len;
3395                 /* version >= 2, metadata */
3396                 if (__decode_session_metadata(&p, end, &blocklisted) < 0)
3397                         goto bad;
3398                 /* version >= 3, feature bits */
3399                 ceph_decode_32_safe(&p, end, len, bad);
3400                 if (len) {
3401                         ceph_decode_64_safe(&p, end, features, bad);
3402                         p += len - sizeof(features);
3403                 }
3404         }
3405
3406         mutex_lock(&mdsc->mutex);
3407         if (op == CEPH_SESSION_CLOSE) {
3408                 ceph_get_mds_session(session);
3409                 __unregister_session(mdsc, session);
3410         }
3411         /* FIXME: this ttl calculation is generous */
3412         session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
3413         mutex_unlock(&mdsc->mutex);
3414
3415         mutex_lock(&session->s_mutex);
3416
3417         dout("handle_session mds%d %s %p state %s seq %llu\n",
3418              mds, ceph_session_op_name(op), session,
3419              ceph_session_state_name(session->s_state), seq);
3420
3421         if (session->s_state == CEPH_MDS_SESSION_HUNG) {
3422                 session->s_state = CEPH_MDS_SESSION_OPEN;
3423                 pr_info("mds%d came back\n", session->s_mds);
3424         }
3425
3426         switch (op) {
3427         case CEPH_SESSION_OPEN:
3428                 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
3429                         pr_info("mds%d reconnect success\n", session->s_mds);
3430                 session->s_state = CEPH_MDS_SESSION_OPEN;
3431                 session->s_features = features;
3432                 renewed_caps(mdsc, session, 0);
3433                 if (test_bit(CEPHFS_FEATURE_METRIC_COLLECT, &session->s_features))
3434                         metric_schedule_delayed(&mdsc->metric);
3435                 wake = 1;
3436                 if (mdsc->stopping)
3437                         __close_session(mdsc, session);
3438                 break;
3439
3440         case CEPH_SESSION_RENEWCAPS:
3441                 if (session->s_renew_seq == seq)
3442                         renewed_caps(mdsc, session, 1);
3443                 break;
3444
3445         case CEPH_SESSION_CLOSE:
3446                 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
3447                         pr_info("mds%d reconnect denied\n", session->s_mds);
3448                 session->s_state = CEPH_MDS_SESSION_CLOSED;
3449                 cleanup_session_requests(mdsc, session);
3450                 remove_session_caps(session);
3451                 wake = 2; /* for good measure */
3452                 wake_up_all(&mdsc->session_close_wq);
3453                 break;
3454
3455         case CEPH_SESSION_STALE:
3456                 pr_info("mds%d caps went stale, renewing\n",
3457                         session->s_mds);
3458                 spin_lock(&session->s_gen_ttl_lock);
3459                 session->s_cap_gen++;
3460                 session->s_cap_ttl = jiffies - 1;
3461                 spin_unlock(&session->s_gen_ttl_lock);
3462                 send_renew_caps(mdsc, session);
3463                 break;
3464
3465         case CEPH_SESSION_RECALL_STATE:
3466                 ceph_trim_caps(mdsc, session, le32_to_cpu(h->max_caps));
3467                 break;
3468
3469         case CEPH_SESSION_FLUSHMSG:
3470                 send_flushmsg_ack(mdsc, session, seq);
3471                 break;
3472
3473         case CEPH_SESSION_FORCE_RO:
3474                 dout("force_session_readonly %p\n", session);
3475                 spin_lock(&session->s_cap_lock);
3476                 session->s_readonly = true;
3477                 spin_unlock(&session->s_cap_lock);
3478                 wake_up_session_caps(session, FORCE_RO);
3479                 break;
3480
3481         case CEPH_SESSION_REJECT:
3482                 WARN_ON(session->s_state != CEPH_MDS_SESSION_OPENING);
3483                 pr_info("mds%d rejected session\n", session->s_mds);
3484                 session->s_state = CEPH_MDS_SESSION_REJECTED;
3485                 cleanup_session_requests(mdsc, session);
3486                 remove_session_caps(session);
3487                 if (blocklisted)
3488                         mdsc->fsc->blocklisted = true;
3489                 wake = 2; /* for good measure */
3490                 break;
3491
3492         default:
3493                 pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
3494                 WARN_ON(1);
3495         }
3496
3497         mutex_unlock(&session->s_mutex);
3498         if (wake) {
3499                 mutex_lock(&mdsc->mutex);
3500                 __wake_requests(mdsc, &session->s_waiting);
3501                 if (wake == 2)
3502                         kick_requests(mdsc, mds);
3503                 mutex_unlock(&mdsc->mutex);
3504         }
3505         if (op == CEPH_SESSION_CLOSE)
3506                 ceph_put_mds_session(session);
3507         return;
3508
3509 bad:
3510         pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds,
3511                (int)msg->front.iov_len);
3512         ceph_msg_dump(msg);
3513         return;
3514 }
3515
3516 void ceph_mdsc_release_dir_caps(struct ceph_mds_request *req)
3517 {
3518         int dcaps;
3519
3520         dcaps = xchg(&req->r_dir_caps, 0);
3521         if (dcaps) {
3522                 dout("releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
3523                 ceph_put_cap_refs(ceph_inode(req->r_parent), dcaps);
3524         }
3525 }
3526
3527 void ceph_mdsc_release_dir_caps_no_check(struct ceph_mds_request *req)
3528 {
3529         int dcaps;
3530
3531         dcaps = xchg(&req->r_dir_caps, 0);
3532         if (dcaps) {
3533                 dout("releasing r_dir_caps=%s\n", ceph_cap_string(dcaps));
3534                 ceph_put_cap_refs_no_check_caps(ceph_inode(req->r_parent),
3535                                                 dcaps);
3536         }
3537 }
3538
3539 /*
3540  * called under session->mutex.
3541  */
3542 static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
3543                                    struct ceph_mds_session *session)
3544 {
3545         struct ceph_mds_request *req, *nreq;
3546         struct rb_node *p;
3547
3548         dout("replay_unsafe_requests mds%d\n", session->s_mds);
3549
3550         mutex_lock(&mdsc->mutex);
3551         list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item)
3552                 __send_request(mdsc, session, req, true);
3553
3554         /*
3555          * also re-send old requests when MDS enters reconnect stage. So that MDS
3556          * can process completed request in clientreplay stage.
3557          */
3558         p = rb_first(&mdsc->request_tree);
3559         while (p) {
3560                 req = rb_entry(p, struct ceph_mds_request, r_node);
3561                 p = rb_next(p);
3562                 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
3563                         continue;
3564                 if (req->r_attempts == 0)
3565                         continue; /* only old requests */
3566                 if (!req->r_session)
3567                         continue;
3568                 if (req->r_session->s_mds != session->s_mds)
3569                         continue;
3570
3571                 ceph_mdsc_release_dir_caps_no_check(req);
3572
3573                 __send_request(mdsc, session, req, true);
3574         }
3575         mutex_unlock(&mdsc->mutex);
3576 }
3577
3578 static int send_reconnect_partial(struct ceph_reconnect_state *recon_state)
3579 {
3580         struct ceph_msg *reply;
3581         struct ceph_pagelist *_pagelist;
3582         struct page *page;
3583         __le32 *addr;
3584         int err = -ENOMEM;
3585
3586         if (!recon_state->allow_multi)
3587                 return -ENOSPC;
3588
3589         /* can't handle message that contains both caps and realm */
3590         BUG_ON(!recon_state->nr_caps == !recon_state->nr_realms);
3591
3592         /* pre-allocate new pagelist */
3593         _pagelist = ceph_pagelist_alloc(GFP_NOFS);
3594         if (!_pagelist)
3595                 return -ENOMEM;
3596
3597         reply = ceph_msg_new2(CEPH_MSG_CLIENT_RECONNECT, 0, 1, GFP_NOFS, false);
3598         if (!reply)
3599                 goto fail_msg;
3600
3601         /* placeholder for nr_caps */
3602         err = ceph_pagelist_encode_32(_pagelist, 0);
3603         if (err < 0)
3604                 goto fail;
3605
3606         if (recon_state->nr_caps) {
3607                 /* currently encoding caps */
3608                 err = ceph_pagelist_encode_32(recon_state->pagelist, 0);
3609                 if (err)
3610                         goto fail;
3611         } else {
3612                 /* placeholder for nr_realms (currently encoding relams) */
3613                 err = ceph_pagelist_encode_32(_pagelist, 0);
3614                 if (err < 0)
3615                         goto fail;
3616         }
3617
3618         err = ceph_pagelist_encode_8(recon_state->pagelist, 1);
3619         if (err)
3620                 goto fail;
3621
3622         page = list_first_entry(&recon_state->pagelist->head, struct page, lru);
3623         addr = kmap_atomic(page);
3624         if (recon_state->nr_caps) {
3625                 /* currently encoding caps */
3626                 *addr = cpu_to_le32(recon_state->nr_caps);
3627         } else {
3628                 /* currently encoding relams */
3629                 *(addr + 1) = cpu_to_le32(recon_state->nr_realms);
3630         }
3631         kunmap_atomic(addr);
3632
3633         reply->hdr.version = cpu_to_le16(5);
3634         reply->hdr.compat_version = cpu_to_le16(4);
3635
3636         reply->hdr.data_len = cpu_to_le32(recon_state->pagelist->length);
3637         ceph_msg_data_add_pagelist(reply, recon_state->pagelist);
3638
3639         ceph_con_send(&recon_state->session->s_con, reply);
3640         ceph_pagelist_release(recon_state->pagelist);
3641
3642         recon_state->pagelist = _pagelist;
3643         recon_state->nr_caps = 0;
3644         recon_state->nr_realms = 0;
3645         recon_state->msg_version = 5;
3646         return 0;
3647 fail:
3648         ceph_msg_put(reply);
3649 fail_msg:
3650         ceph_pagelist_release(_pagelist);
3651         return err;
3652 }
3653
3654 static struct dentry* d_find_primary(struct inode *inode)
3655 {
3656         struct dentry *alias, *dn = NULL;
3657
3658         if (hlist_empty(&inode->i_dentry))
3659                 return NULL;
3660
3661         spin_lock(&inode->i_lock);
3662         if (hlist_empty(&inode->i_dentry))
3663                 goto out_unlock;
3664
3665         if (S_ISDIR(inode->i_mode)) {
3666                 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias);
3667                 if (!IS_ROOT(alias))
3668                         dn = dget(alias);
3669                 goto out_unlock;
3670         }
3671
3672         hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
3673                 spin_lock(&alias->d_lock);
3674                 if (!d_unhashed(alias) &&
3675                     (ceph_dentry(alias)->flags & CEPH_DENTRY_PRIMARY_LINK)) {
3676                         dn = dget_dlock(alias);
3677                 }
3678                 spin_unlock(&alias->d_lock);
3679                 if (dn)
3680                         break;
3681         }
3682 out_unlock:
3683         spin_unlock(&inode->i_lock);
3684         return dn;
3685 }
3686
3687 /*
3688  * Encode information about a cap for a reconnect with the MDS.
3689  */
3690 static int reconnect_caps_cb(struct inode *inode, struct ceph_cap *cap,
3691                           void *arg)
3692 {
3693         union {
3694                 struct ceph_mds_cap_reconnect v2;
3695                 struct ceph_mds_cap_reconnect_v1 v1;
3696         } rec;
3697         struct ceph_inode_info *ci = cap->ci;
3698         struct ceph_reconnect_state *recon_state = arg;
3699         struct ceph_pagelist *pagelist = recon_state->pagelist;
3700         struct dentry *dentry;
3701         char *path;
3702         int pathlen = 0, err;
3703         u64 pathbase;
3704         u64 snap_follows;
3705
3706         dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
3707              inode, ceph_vinop(inode), cap, cap->cap_id,
3708              ceph_cap_string(cap->issued));
3709
3710         dentry = d_find_primary(inode);
3711         if (dentry) {
3712                 /* set pathbase to parent dir when msg_version >= 2 */
3713                 path = ceph_mdsc_build_path(dentry, &pathlen, &pathbase,
3714                                             recon_state->msg_version >= 2);
3715                 dput(dentry);
3716                 if (IS_ERR(path)) {
3717                         err = PTR_ERR(path);
3718                         goto out_err;
3719                 }
3720         } else {
3721                 path = NULL;
3722                 pathbase = 0;
3723         }
3724
3725         spin_lock(&ci->i_ceph_lock);
3726         cap->seq = 0;        /* reset cap seq */
3727         cap->issue_seq = 0;  /* and issue_seq */
3728         cap->mseq = 0;       /* and migrate_seq */
3729         cap->cap_gen = cap->session->s_cap_gen;
3730
3731         /* These are lost when the session goes away */
3732         if (S_ISDIR(inode->i_mode)) {
3733                 if (cap->issued & CEPH_CAP_DIR_CREATE) {
3734                         ceph_put_string(rcu_dereference_raw(ci->i_cached_layout.pool_ns));
3735                         memset(&ci->i_cached_layout, 0, sizeof(ci->i_cached_layout));
3736                 }
3737                 cap->issued &= ~CEPH_CAP_ANY_DIR_OPS;
3738         }
3739
3740         if (recon_state->msg_version >= 2) {
3741                 rec.v2.cap_id = cpu_to_le64(cap->cap_id);
3742                 rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
3743                 rec.v2.issued = cpu_to_le32(cap->issued);
3744                 rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
3745                 rec.v2.pathbase = cpu_to_le64(pathbase);
3746                 rec.v2.flock_len = (__force __le32)
3747                         ((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1);
3748         } else {
3749                 rec.v1.cap_id = cpu_to_le64(cap->cap_id);
3750                 rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
3751                 rec.v1.issued = cpu_to_le32(cap->issued);
3752                 rec.v1.size = cpu_to_le64(inode->i_size);
3753                 ceph_encode_timespec64(&rec.v1.mtime, &inode->i_mtime);
3754                 ceph_encode_timespec64(&rec.v1.atime, &inode->i_atime);
3755                 rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
3756                 rec.v1.pathbase = cpu_to_le64(pathbase);
3757         }
3758
3759         if (list_empty(&ci->i_cap_snaps)) {
3760                 snap_follows = ci->i_head_snapc ? ci->i_head_snapc->seq : 0;
3761         } else {
3762                 struct ceph_cap_snap *capsnap =
3763                         list_first_entry(&ci->i_cap_snaps,
3764                                          struct ceph_cap_snap, ci_item);
3765                 snap_follows = capsnap->follows;
3766         }
3767         spin_unlock(&ci->i_ceph_lock);
3768
3769         if (recon_state->msg_version >= 2) {
3770                 int num_fcntl_locks, num_flock_locks;
3771                 struct ceph_filelock *flocks = NULL;
3772                 size_t struct_len, total_len = sizeof(u64);
3773                 u8 struct_v = 0;
3774
3775 encode_again:
3776                 if (rec.v2.flock_len) {
3777                         ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
3778                 } else {
3779                         num_fcntl_locks = 0;
3780                         num_flock_locks = 0;
3781                 }
3782                 if (num_fcntl_locks + num_flock_locks > 0) {
3783                         flocks = kmalloc_array(num_fcntl_locks + num_flock_locks,
3784                                                sizeof(struct ceph_filelock),
3785                                                GFP_NOFS);
3786                         if (!flocks) {
3787                                 err = -ENOMEM;
3788                                 goto out_err;
3789                         }
3790                         err = ceph_encode_locks_to_buffer(inode, flocks,
3791                                                           num_fcntl_locks,
3792                                                           num_flock_locks);
3793                         if (err) {
3794                                 kfree(flocks);
3795                                 flocks = NULL;
3796                                 if (err == -ENOSPC)
3797                                         goto encode_again;
3798                                 goto out_err;
3799                         }
3800                 } else {
3801                         kfree(flocks);
3802                         flocks = NULL;
3803                 }
3804
3805                 if (recon_state->msg_version >= 3) {
3806                         /* version, compat_version and struct_len */
3807                         total_len += 2 * sizeof(u8) + sizeof(u32);
3808                         struct_v = 2;
3809                 }
3810                 /*
3811                  * number of encoded locks is stable, so copy to pagelist
3812                  */
3813                 struct_len = 2 * sizeof(u32) +
3814                             (num_fcntl_locks + num_flock_locks) *
3815                             sizeof(struct ceph_filelock);
3816                 rec.v2.flock_len = cpu_to_le32(struct_len);
3817
3818                 struct_len += sizeof(u32) + pathlen + sizeof(rec.v2);
3819
3820                 if (struct_v >= 2)
3821                         struct_len += sizeof(u64); /* snap_follows */
3822
3823                 total_len += struct_len;
3824
3825                 if (pagelist->length + total_len > RECONNECT_MAX_SIZE) {
3826                         err = send_reconnect_partial(recon_state);
3827                         if (err)
3828                                 goto out_freeflocks;
3829                         pagelist = recon_state->pagelist;
3830                 }
3831
3832                 err = ceph_pagelist_reserve(pagelist, total_len);
3833                 if (err)
3834                         goto out_freeflocks;
3835
3836                 ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
3837                 if (recon_state->msg_version >= 3) {
3838                         ceph_pagelist_encode_8(pagelist, struct_v);
3839                         ceph_pagelist_encode_8(pagelist, 1);
3840                         ceph_pagelist_encode_32(pagelist, struct_len);
3841                 }
3842                 ceph_pagelist_encode_string(pagelist, path, pathlen);
3843                 ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2));
3844                 ceph_locks_to_pagelist(flocks, pagelist,
3845                                        num_fcntl_locks, num_flock_locks);
3846                 if (struct_v >= 2)
3847                         ceph_pagelist_encode_64(pagelist, snap_follows);
3848 out_freeflocks:
3849                 kfree(flocks);
3850         } else {
3851                 err = ceph_pagelist_reserve(pagelist,
3852                                             sizeof(u64) + sizeof(u32) +
3853                                             pathlen + sizeof(rec.v1));
3854                 if (err)
3855                         goto out_err;
3856
3857                 ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
3858                 ceph_pagelist_encode_string(pagelist, path, pathlen);
3859                 ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1));
3860         }
3861
3862 out_err:
3863         ceph_mdsc_free_path(path, pathlen);
3864         if (!err)
3865                 recon_state->nr_caps++;
3866         return err;
3867 }
3868
3869 static int encode_snap_realms(struct ceph_mds_client *mdsc,
3870                               struct ceph_reconnect_state *recon_state)
3871 {
3872         struct rb_node *p;
3873         struct ceph_pagelist *pagelist = recon_state->pagelist;
3874         int err = 0;
3875
3876         if (recon_state->msg_version >= 4) {
3877                 err = ceph_pagelist_encode_32(pagelist, mdsc->num_snap_realms);
3878                 if (err < 0)
3879                         goto fail;
3880         }
3881
3882         /*
3883          * snaprealms.  we provide mds with the ino, seq (version), and
3884          * parent for all of our realms.  If the mds has any newer info,
3885          * it will tell us.
3886          */
3887         for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) {
3888                 struct ceph_snap_realm *realm =
3889                        rb_entry(p, struct ceph_snap_realm, node);
3890                 struct ceph_mds_snaprealm_reconnect sr_rec;
3891
3892                 if (recon_state->msg_version >= 4) {
3893                         size_t need = sizeof(u8) * 2 + sizeof(u32) +
3894                                       sizeof(sr_rec);
3895
3896                         if (pagelist->length + need > RECONNECT_MAX_SIZE) {
3897                                 err = send_reconnect_partial(recon_state);
3898                                 if (err)
3899                                         goto fail;
3900                                 pagelist = recon_state->pagelist;
3901                         }
3902
3903                         err = ceph_pagelist_reserve(pagelist, need);
3904                         if (err)
3905                                 goto fail;
3906
3907                         ceph_pagelist_encode_8(pagelist, 1);
3908                         ceph_pagelist_encode_8(pagelist, 1);
3909                         ceph_pagelist_encode_32(pagelist, sizeof(sr_rec));
3910                 }
3911
3912                 dout(" adding snap realm %llx seq %lld parent %llx\n",
3913                      realm->ino, realm->seq, realm->parent_ino);
3914                 sr_rec.ino = cpu_to_le64(realm->ino);
3915                 sr_rec.seq = cpu_to_le64(realm->seq);
3916                 sr_rec.parent = cpu_to_le64(realm->parent_ino);
3917
3918                 err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec));
3919                 if (err)
3920                         goto fail;
3921
3922                 recon_state->nr_realms++;
3923         }
3924 fail:
3925         return err;
3926 }
3927
3928
3929 /*
3930  * If an MDS fails and recovers, clients need to reconnect in order to
3931  * reestablish shared state.  This includes all caps issued through
3932  * this session _and_ the snap_realm hierarchy.  Because it's not
3933  * clear which snap realms the mds cares about, we send everything we
3934  * know about.. that ensures we'll then get any new info the
3935  * recovering MDS might have.
3936  *
3937  * This is a relatively heavyweight operation, but it's rare.
3938  */
3939 static void send_mds_reconnect(struct ceph_mds_client *mdsc,
3940                                struct ceph_mds_session *session)
3941 {
3942         struct ceph_msg *reply;
3943         int mds = session->s_mds;
3944         int err = -ENOMEM;
3945         struct ceph_reconnect_state recon_state = {
3946                 .session = session,
3947         };
3948         LIST_HEAD(dispose);
3949
3950         pr_info("mds%d reconnect start\n", mds);
3951
3952         recon_state.pagelist = ceph_pagelist_alloc(GFP_NOFS);
3953         if (!recon_state.pagelist)
3954                 goto fail_nopagelist;
3955
3956         reply = ceph_msg_new2(CEPH_MSG_CLIENT_RECONNECT, 0, 1, GFP_NOFS, false);
3957         if (!reply)
3958                 goto fail_nomsg;
3959
3960         xa_destroy(&session->s_delegated_inos);
3961
3962         mutex_lock(&session->s_mutex);
3963         session->s_state = CEPH_MDS_SESSION_RECONNECTING;
3964         session->s_seq = 0;
3965
3966         dout("session %p state %s\n", session,
3967              ceph_session_state_name(session->s_state));
3968
3969         spin_lock(&session->s_gen_ttl_lock);
3970         session->s_cap_gen++;
3971         spin_unlock(&session->s_gen_ttl_lock);
3972
3973         spin_lock(&session->s_cap_lock);
3974         /* don't know if session is readonly */
3975         session->s_readonly = 0;
3976         /*
3977          * notify __ceph_remove_cap() that we are composing cap reconnect.
3978          * If a cap get released before being added to the cap reconnect,
3979          * __ceph_remove_cap() should skip queuing cap release.
3980          */
3981         session->s_cap_reconnect = 1;
3982         /* drop old cap expires; we're about to reestablish that state */
3983         detach_cap_releases(session, &dispose);
3984         spin_unlock(&session->s_cap_lock);
3985         dispose_cap_releases(mdsc, &dispose);
3986
3987         /* trim unused caps to reduce MDS's cache rejoin time */
3988         if (mdsc->fsc->sb->s_root)
3989                 shrink_dcache_parent(mdsc->fsc->sb->s_root);
3990
3991         ceph_con_close(&session->s_con);
3992         ceph_con_open(&session->s_con,
3993                       CEPH_ENTITY_TYPE_MDS, mds,
3994                       ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
3995
3996         /* replay unsafe requests */
3997         replay_unsafe_requests(mdsc, session);
3998
3999         ceph_early_kick_flushing_caps(mdsc, session);
4000
4001         down_read(&mdsc->snap_rwsem);
4002
4003         /* placeholder for nr_caps */
4004         err = ceph_pagelist_encode_32(recon_state.pagelist, 0);
4005         if (err)
4006                 goto fail;
4007
4008         if (test_bit(CEPHFS_FEATURE_MULTI_RECONNECT, &session->s_features)) {
4009                 recon_state.msg_version = 3;
4010                 recon_state.allow_multi = true;
4011         } else if (session->s_con.peer_features & CEPH_FEATURE_MDSENC) {
4012                 recon_state.msg_version = 3;
4013         } else {
4014                 recon_state.msg_version = 2;
4015         }
4016         /* trsaverse this session's caps */
4017         err = ceph_iterate_session_caps(session, reconnect_caps_cb, &recon_state);
4018
4019         spin_lock(&session->s_cap_lock);
4020         session->s_cap_reconnect = 0;
4021         spin_unlock(&session->s_cap_lock);
4022
4023         if (err < 0)
4024                 goto fail;
4025
4026         /* check if all realms can be encoded into current message */
4027         if (mdsc->num_snap_realms) {
4028                 size_t total_len =
4029                         recon_state.pagelist->length +
4030                         mdsc->num_snap_realms *
4031                         sizeof(struct ceph_mds_snaprealm_reconnect);
4032                 if (recon_state.msg_version >= 4) {
4033                         /* number of realms */
4034                         total_len += sizeof(u32);
4035                         /* version, compat_version and struct_len */
4036                         total_len += mdsc->num_snap_realms *
4037                                      (2 * sizeof(u8) + sizeof(u32));
4038                 }
4039                 if (total_len > RECONNECT_MAX_SIZE) {
4040                         if (!recon_state.allow_multi) {
4041                                 err = -ENOSPC;
4042                                 goto fail;
4043                         }
4044                         if (recon_state.nr_caps) {
4045                                 err = send_reconnect_partial(&recon_state);
4046                                 if (err)
4047                                         goto fail;
4048                         }
4049                         recon_state.msg_version = 5;
4050                 }
4051         }
4052
4053         err = encode_snap_realms(mdsc, &recon_state);
4054         if (err < 0)
4055                 goto fail;
4056
4057         if (recon_state.msg_version >= 5) {
4058                 err = ceph_pagelist_encode_8(recon_state.pagelist, 0);
4059                 if (err < 0)
4060                         goto fail;
4061         }
4062
4063         if (recon_state.nr_caps || recon_state.nr_realms) {
4064                 struct page *page =
4065                         list_first_entry(&recon_state.pagelist->head,
4066                                         struct page, lru);
4067                 __le32 *addr = kmap_atomic(page);
4068                 if (recon_state.nr_caps) {
4069                         WARN_ON(recon_state.nr_realms != mdsc->num_snap_realms);
4070                         *addr = cpu_to_le32(recon_state.nr_caps);
4071                 } else if (recon_state.msg_version >= 4) {
4072                         *(addr + 1) = cpu_to_le32(recon_state.nr_realms);
4073                 }
4074                 kunmap_atomic(addr);
4075         }
4076
4077         reply->hdr.version = cpu_to_le16(recon_state.msg_version);
4078         if (recon_state.msg_version >= 4)
4079                 reply->hdr.compat_version = cpu_to_le16(4);
4080
4081         reply->hdr.data_len = cpu_to_le32(recon_state.pagelist->length);
4082         ceph_msg_data_add_pagelist(reply, recon_state.pagelist);
4083
4084         ceph_con_send(&session->s_con, reply);
4085
4086         mutex_unlock(&session->s_mutex);
4087
4088         mutex_lock(&mdsc->mutex);
4089         __wake_requests(mdsc, &session->s_waiting);
4090         mutex_unlock(&mdsc->mutex);
4091
4092         up_read(&mdsc->snap_rwsem);
4093         ceph_pagelist_release(recon_state.pagelist);
4094         return;
4095
4096 fail:
4097         ceph_msg_put(reply);
4098         up_read(&mdsc->snap_rwsem);
4099         mutex_unlock(&session->s_mutex);
4100 fail_nomsg:
4101         ceph_pagelist_release(recon_state.pagelist);
4102 fail_nopagelist:
4103         pr_err("error %d preparing reconnect for mds%d\n", err, mds);
4104         return;
4105 }
4106
4107
4108 /*
4109  * compare old and new mdsmaps, kicking requests
4110  * and closing out old connections as necessary
4111  *
4112  * called under mdsc->mutex.
4113  */
4114 static void check_new_map(struct ceph_mds_client *mdsc,
4115                           struct ceph_mdsmap *newmap,
4116                           struct ceph_mdsmap *oldmap)
4117 {
4118         int i;
4119         int oldstate, newstate;
4120         struct ceph_mds_session *s;
4121
4122         dout("check_new_map new %u old %u\n",
4123              newmap->m_epoch, oldmap->m_epoch);
4124
4125         for (i = 0; i < oldmap->possible_max_rank && i < mdsc->max_sessions; i++) {
4126                 if (!mdsc->sessions[i])
4127                         continue;
4128                 s = mdsc->sessions[i];
4129                 oldstate = ceph_mdsmap_get_state(oldmap, i);
4130                 newstate = ceph_mdsmap_get_state(newmap, i);
4131
4132                 dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
4133                      i, ceph_mds_state_name(oldstate),
4134                      ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "",
4135                      ceph_mds_state_name(newstate),
4136                      ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
4137                      ceph_session_state_name(s->s_state));
4138
4139                 if (i >= newmap->possible_max_rank) {
4140                         /* force close session for stopped mds */
4141                         ceph_get_mds_session(s);
4142                         __unregister_session(mdsc, s);
4143                         __wake_requests(mdsc, &s->s_waiting);
4144                         mutex_unlock(&mdsc->mutex);
4145
4146                         mutex_lock(&s->s_mutex);
4147                         cleanup_session_requests(mdsc, s);
4148                         remove_session_caps(s);
4149                         mutex_unlock(&s->s_mutex);
4150
4151                         ceph_put_mds_session(s);
4152
4153                         mutex_lock(&mdsc->mutex);
4154                         kick_requests(mdsc, i);
4155                         continue;
4156                 }
4157
4158                 if (memcmp(ceph_mdsmap_get_addr(oldmap, i),
4159                            ceph_mdsmap_get_addr(newmap, i),
4160                            sizeof(struct ceph_entity_addr))) {
4161                         /* just close it */
4162                         mutex_unlock(&mdsc->mutex);
4163                         mutex_lock(&s->s_mutex);
4164                         mutex_lock(&mdsc->mutex);
4165                         ceph_con_close(&s->s_con);
4166                         mutex_unlock(&s->s_mutex);
4167                         s->s_state = CEPH_MDS_SESSION_RESTARTING;
4168                 } else if (oldstate == newstate) {
4169                         continue;  /* nothing new with this mds */
4170                 }
4171
4172                 /*
4173                  * send reconnect?
4174                  */
4175                 if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
4176                     newstate >= CEPH_MDS_STATE_RECONNECT) {
4177                         mutex_unlock(&mdsc->mutex);
4178                         send_mds_reconnect(mdsc, s);
4179                         mutex_lock(&mdsc->mutex);
4180                 }
4181
4182                 /*
4183                  * kick request on any mds that has gone active.
4184                  */
4185                 if (oldstate < CEPH_MDS_STATE_ACTIVE &&
4186                     newstate >= CEPH_MDS_STATE_ACTIVE) {
4187                         if (oldstate != CEPH_MDS_STATE_CREATING &&
4188                             oldstate != CEPH_MDS_STATE_STARTING)
4189                                 pr_info("mds%d recovery completed\n", s->s_mds);
4190                         kick_requests(mdsc, i);
4191                         mutex_unlock(&mdsc->mutex);
4192                         mutex_lock(&s->s_mutex);
4193                         mutex_lock(&mdsc->mutex);
4194                         ceph_kick_flushing_caps(mdsc, s);
4195                         mutex_unlock(&s->s_mutex);
4196                         wake_up_session_caps(s, RECONNECT);
4197                 }
4198         }
4199
4200         for (i = 0; i < newmap->possible_max_rank && i < mdsc->max_sessions; i++) {
4201                 s = mdsc->sessions[i];
4202                 if (!s)
4203                         continue;
4204                 if (!ceph_mdsmap_is_laggy(newmap, i))
4205                         continue;
4206                 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
4207                     s->s_state == CEPH_MDS_SESSION_HUNG ||
4208                     s->s_state == CEPH_MDS_SESSION_CLOSING) {
4209                         dout(" connecting to export targets of laggy mds%d\n",
4210                              i);
4211                         __open_export_target_sessions(mdsc, s);
4212                 }
4213         }
4214 }
4215
4216
4217
4218 /*
4219  * leases
4220  */
4221
4222 /*
4223  * caller must hold session s_mutex, dentry->d_lock
4224  */
4225 void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry)
4226 {
4227         struct ceph_dentry_info *di = ceph_dentry(dentry);
4228
4229         ceph_put_mds_session(di->lease_session);
4230         di->lease_session = NULL;
4231 }
4232
4233 static void handle_lease(struct ceph_mds_client *mdsc,
4234                          struct ceph_mds_session *session,
4235                          struct ceph_msg *msg)
4236 {
4237         struct super_block *sb = mdsc->fsc->sb;
4238         struct inode *inode;
4239         struct dentry *parent, *dentry;
4240         struct ceph_dentry_info *di;
4241         int mds = session->s_mds;
4242         struct ceph_mds_lease *h = msg->front.iov_base;
4243         u32 seq;
4244         struct ceph_vino vino;
4245         struct qstr dname;
4246         int release = 0;
4247
4248         dout("handle_lease from mds%d\n", mds);
4249
4250         /* decode */
4251         if (msg->front.iov_len < sizeof(*h) + sizeof(u32))
4252                 goto bad;
4253         vino.ino = le64_to_cpu(h->ino);
4254         vino.snap = CEPH_NOSNAP;
4255         seq = le32_to_cpu(h->seq);
4256         dname.len = get_unaligned_le32(h + 1);
4257         if (msg->front.iov_len < sizeof(*h) + sizeof(u32) + dname.len)
4258                 goto bad;
4259         dname.name = (void *)(h + 1) + sizeof(u32);
4260
4261         /* lookup inode */
4262         inode = ceph_find_inode(sb, vino);
4263         dout("handle_lease %s, ino %llx %p %.*s\n",
4264              ceph_lease_op_name(h->action), vino.ino, inode,
4265              dname.len, dname.name);
4266
4267         mutex_lock(&session->s_mutex);
4268         inc_session_sequence(session);
4269
4270         if (!inode) {
4271                 dout("handle_lease no inode %llx\n", vino.ino);
4272                 goto release;
4273         }
4274
4275         /* dentry */
4276         parent = d_find_alias(inode);
4277         if (!parent) {
4278                 dout("no parent dentry on inode %p\n", inode);
4279                 WARN_ON(1);
4280                 goto release;  /* hrm... */
4281         }
4282         dname.hash = full_name_hash(parent, dname.name, dname.len);
4283         dentry = d_lookup(parent, &dname);
4284         dput(parent);
4285         if (!dentry)
4286                 goto release;
4287
4288         spin_lock(&dentry->d_lock);
4289         di = ceph_dentry(dentry);
4290         switch (h->action) {
4291         case CEPH_MDS_LEASE_REVOKE:
4292                 if (di->lease_session == session) {
4293                         if (ceph_seq_cmp(di->lease_seq, seq) > 0)
4294                                 h->seq = cpu_to_le32(di->lease_seq);
4295                         __ceph_mdsc_drop_dentry_lease(dentry);
4296                 }
4297                 release = 1;
4298                 break;
4299
4300         case CEPH_MDS_LEASE_RENEW:
4301                 if (di->lease_session == session &&
4302                     di->lease_gen == session->s_cap_gen &&
4303                     di->lease_renew_from &&
4304                     di->lease_renew_after == 0) {
4305                         unsigned long duration =
4306                                 msecs_to_jiffies(le32_to_cpu(h->duration_ms));
4307
4308                         di->lease_seq = seq;
4309                         di->time = di->lease_renew_from + duration;
4310                         di->lease_renew_after = di->lease_renew_from +
4311                                 (duration >> 1);
4312                         di->lease_renew_from = 0;
4313                 }
4314                 break;
4315         }
4316         spin_unlock(&dentry->d_lock);
4317         dput(dentry);
4318
4319         if (!release)
4320                 goto out;
4321
4322 release:
4323         /* let's just reuse the same message */
4324         h->action = CEPH_MDS_LEASE_REVOKE_ACK;
4325         ceph_msg_get(msg);
4326         ceph_con_send(&session->s_con, msg);
4327
4328 out:
4329         mutex_unlock(&session->s_mutex);
4330         /* avoid calling iput_final() in mds dispatch threads */
4331         ceph_async_iput(inode);
4332         return;
4333
4334 bad:
4335         pr_err("corrupt lease message\n");
4336         ceph_msg_dump(msg);
4337 }
4338
4339 void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
4340                               struct dentry *dentry, char action,
4341                               u32 seq)
4342 {
4343         struct ceph_msg *msg;
4344         struct ceph_mds_lease *lease;
4345         struct inode *dir;
4346         int len = sizeof(*lease) + sizeof(u32) + NAME_MAX;
4347
4348         dout("lease_send_msg identry %p %s to mds%d\n",
4349              dentry, ceph_lease_op_name(action), session->s_mds);
4350
4351         msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false);
4352         if (!msg)
4353                 return;
4354         lease = msg->front.iov_base;
4355         lease->action = action;
4356         lease->seq = cpu_to_le32(seq);
4357
4358         spin_lock(&dentry->d_lock);
4359         dir = d_inode(dentry->d_parent);
4360         lease->ino = cpu_to_le64(ceph_ino(dir));
4361         lease->first = lease->last = cpu_to_le64(ceph_snap(dir));
4362
4363         put_unaligned_le32(dentry->d_name.len, lease + 1);
4364         memcpy((void *)(lease + 1) + 4,
4365                dentry->d_name.name, dentry->d_name.len);
4366         spin_unlock(&dentry->d_lock);
4367         /*
4368          * if this is a preemptive lease RELEASE, no need to
4369          * flush request stream, since the actual request will
4370          * soon follow.
4371          */
4372         msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE);
4373
4374         ceph_con_send(&session->s_con, msg);
4375 }
4376
4377 /*
4378  * lock unlock sessions, to wait ongoing session activities
4379  */
4380 static void lock_unlock_sessions(struct ceph_mds_client *mdsc)
4381 {
4382         int i;
4383
4384         mutex_lock(&mdsc->mutex);
4385         for (i = 0; i < mdsc->max_sessions; i++) {
4386                 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
4387                 if (!s)
4388                         continue;
4389                 mutex_unlock(&mdsc->mutex);
4390                 mutex_lock(&s->s_mutex);
4391                 mutex_unlock(&s->s_mutex);
4392                 ceph_put_mds_session(s);
4393                 mutex_lock(&mdsc->mutex);
4394         }
4395         mutex_unlock(&mdsc->mutex);
4396 }
4397
4398 static void maybe_recover_session(struct ceph_mds_client *mdsc)
4399 {
4400         struct ceph_fs_client *fsc = mdsc->fsc;
4401
4402         if (!ceph_test_mount_opt(fsc, CLEANRECOVER))
4403                 return;
4404
4405         if (READ_ONCE(fsc->mount_state) != CEPH_MOUNT_MOUNTED)
4406                 return;
4407
4408         if (!READ_ONCE(fsc->blocklisted))
4409                 return;
4410
4411         if (fsc->last_auto_reconnect &&
4412             time_before(jiffies, fsc->last_auto_reconnect + HZ * 60 * 30))
4413                 return;
4414
4415         pr_info("auto reconnect after blocklisted\n");
4416         fsc->last_auto_reconnect = jiffies;
4417         ceph_force_reconnect(fsc->sb);
4418 }
4419
4420 bool check_session_state(struct ceph_mds_session *s)
4421 {
4422         switch (s->s_state) {
4423         case CEPH_MDS_SESSION_OPEN:
4424                 if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
4425                         s->s_state = CEPH_MDS_SESSION_HUNG;
4426                         pr_info("mds%d hung\n", s->s_mds);
4427                 }
4428                 break;
4429         case CEPH_MDS_SESSION_CLOSING:
4430                 /* Should never reach this when we're unmounting */
4431                 WARN_ON_ONCE(s->s_ttl);
4432                 fallthrough;
4433         case CEPH_MDS_SESSION_NEW:
4434         case CEPH_MDS_SESSION_RESTARTING:
4435         case CEPH_MDS_SESSION_CLOSED:
4436         case CEPH_MDS_SESSION_REJECTED:
4437                 return false;
4438         }
4439
4440         return true;
4441 }
4442
4443 /*
4444  * If the sequence is incremented while we're waiting on a REQUEST_CLOSE reply,
4445  * then we need to retransmit that request.
4446  */
4447 void inc_session_sequence(struct ceph_mds_session *s)
4448 {
4449         lockdep_assert_held(&s->s_mutex);
4450
4451         s->s_seq++;
4452
4453         if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
4454                 int ret;
4455
4456                 dout("resending session close request for mds%d\n", s->s_mds);
4457                 ret = request_close_session(s);
4458                 if (ret < 0)
4459                         pr_err("unable to close session to mds%d: %d\n",
4460                                s->s_mds, ret);
4461         }
4462 }
4463
4464 /*
4465  * delayed work -- periodically trim expired leases, renew caps with mds.  If
4466  * the @delay parameter is set to 0 or if it's more than 5 secs, the default
4467  * workqueue delay value of 5 secs will be used.
4468  */
4469 static void schedule_delayed(struct ceph_mds_client *mdsc, unsigned long delay)
4470 {
4471         unsigned long max_delay = HZ * 5;
4472
4473         /* 5 secs default delay */
4474         if (!delay || (delay > max_delay))
4475                 delay = max_delay;
4476         schedule_delayed_work(&mdsc->delayed_work,
4477                               round_jiffies_relative(delay));
4478 }
4479
4480 static void delayed_work(struct work_struct *work)
4481 {
4482         struct ceph_mds_client *mdsc =
4483                 container_of(work, struct ceph_mds_client, delayed_work.work);
4484         unsigned long delay;
4485         int renew_interval;
4486         int renew_caps;
4487         int i;
4488
4489         dout("mdsc delayed_work\n");
4490
4491         if (mdsc->stopping)
4492                 return;
4493
4494         mutex_lock(&mdsc->mutex);
4495         renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
4496         renew_caps = time_after_eq(jiffies, HZ*renew_interval +
4497                                    mdsc->last_renew_caps);
4498         if (renew_caps)
4499                 mdsc->last_renew_caps = jiffies;
4500
4501         for (i = 0; i < mdsc->max_sessions; i++) {
4502                 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
4503                 if (!s)
4504                         continue;
4505
4506                 if (!check_session_state(s)) {
4507                         ceph_put_mds_session(s);
4508                         continue;
4509                 }
4510                 mutex_unlock(&mdsc->mutex);
4511
4512                 mutex_lock(&s->s_mutex);
4513                 if (renew_caps)
4514                         send_renew_caps(mdsc, s);
4515                 else
4516                         ceph_con_keepalive(&s->s_con);
4517                 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
4518                     s->s_state == CEPH_MDS_SESSION_HUNG)
4519                         ceph_send_cap_releases(mdsc, s);
4520                 mutex_unlock(&s->s_mutex);
4521                 ceph_put_mds_session(s);
4522
4523                 mutex_lock(&mdsc->mutex);
4524         }
4525         mutex_unlock(&mdsc->mutex);
4526
4527         delay = ceph_check_delayed_caps(mdsc);
4528
4529         ceph_queue_cap_reclaim_work(mdsc);
4530
4531         ceph_trim_snapid_map(mdsc);
4532
4533         maybe_recover_session(mdsc);
4534
4535         schedule_delayed(mdsc, delay);
4536 }
4537
4538 int ceph_mdsc_init(struct ceph_fs_client *fsc)
4539
4540 {
4541         struct ceph_mds_client *mdsc;
4542         int err;
4543
4544         mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS);
4545         if (!mdsc)
4546                 return -ENOMEM;
4547         mdsc->fsc = fsc;
4548         mutex_init(&mdsc->mutex);
4549         mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
4550         if (!mdsc->mdsmap) {
4551                 err = -ENOMEM;
4552                 goto err_mdsc;
4553         }
4554
4555         init_completion(&mdsc->safe_umount_waiters);
4556         init_waitqueue_head(&mdsc->session_close_wq);
4557         INIT_LIST_HEAD(&mdsc->waiting_for_map);
4558         mdsc->sessions = NULL;
4559         atomic_set(&mdsc->num_sessions, 0);
4560         mdsc->max_sessions = 0;
4561         mdsc->stopping = 0;
4562         atomic64_set(&mdsc->quotarealms_count, 0);
4563         mdsc->quotarealms_inodes = RB_ROOT;
4564         mutex_init(&mdsc->quotarealms_inodes_mutex);
4565         mdsc->last_snap_seq = 0;
4566         init_rwsem(&mdsc->snap_rwsem);
4567         mdsc->snap_realms = RB_ROOT;
4568         INIT_LIST_HEAD(&mdsc->snap_empty);
4569         mdsc->num_snap_realms = 0;
4570         spin_lock_init(&mdsc->snap_empty_lock);
4571         mdsc->last_tid = 0;
4572         mdsc->oldest_tid = 0;
4573         mdsc->request_tree = RB_ROOT;
4574         INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
4575         mdsc->last_renew_caps = jiffies;
4576         INIT_LIST_HEAD(&mdsc->cap_delay_list);
4577         INIT_LIST_HEAD(&mdsc->cap_wait_list);
4578         spin_lock_init(&mdsc->cap_delay_lock);
4579         INIT_LIST_HEAD(&mdsc->snap_flush_list);
4580         spin_lock_init(&mdsc->snap_flush_lock);
4581         mdsc->last_cap_flush_tid = 1;
4582         INIT_LIST_HEAD(&mdsc->cap_flush_list);
4583         INIT_LIST_HEAD(&mdsc->cap_dirty_migrating);
4584         mdsc->num_cap_flushing = 0;
4585         spin_lock_init(&mdsc->cap_dirty_lock);
4586         init_waitqueue_head(&mdsc->cap_flushing_wq);
4587         INIT_WORK(&mdsc->cap_reclaim_work, ceph_cap_reclaim_work);
4588         atomic_set(&mdsc->cap_reclaim_pending, 0);
4589         err = ceph_metric_init(&mdsc->metric);
4590         if (err)
4591                 goto err_mdsmap;
4592
4593         spin_lock_init(&mdsc->dentry_list_lock);
4594         INIT_LIST_HEAD(&mdsc->dentry_leases);
4595         INIT_LIST_HEAD(&mdsc->dentry_dir_leases);
4596
4597         ceph_caps_init(mdsc);
4598         ceph_adjust_caps_max_min(mdsc, fsc->mount_options);
4599
4600         spin_lock_init(&mdsc->snapid_map_lock);
4601         mdsc->snapid_map_tree = RB_ROOT;
4602         INIT_LIST_HEAD(&mdsc->snapid_map_lru);
4603
4604         init_rwsem(&mdsc->pool_perm_rwsem);
4605         mdsc->pool_perm_tree = RB_ROOT;
4606
4607         strscpy(mdsc->nodename, utsname()->nodename,
4608                 sizeof(mdsc->nodename));
4609
4610         fsc->mdsc = mdsc;
4611         return 0;
4612
4613 err_mdsmap:
4614         kfree(mdsc->mdsmap);
4615 err_mdsc:
4616         kfree(mdsc);
4617         return err;
4618 }
4619
4620 /*
4621  * Wait for safe replies on open mds requests.  If we time out, drop
4622  * all requests from the tree to avoid dangling dentry refs.
4623  */
4624 static void wait_requests(struct ceph_mds_client *mdsc)
4625 {
4626         struct ceph_options *opts = mdsc->fsc->client->options;
4627         struct ceph_mds_request *req;
4628
4629         mutex_lock(&mdsc->mutex);
4630         if (__get_oldest_req(mdsc)) {
4631                 mutex_unlock(&mdsc->mutex);
4632
4633                 dout("wait_requests waiting for requests\n");
4634                 wait_for_completion_timeout(&mdsc->safe_umount_waiters,
4635                                     ceph_timeout_jiffies(opts->mount_timeout));
4636
4637                 /* tear down remaining requests */
4638                 mutex_lock(&mdsc->mutex);
4639                 while ((req = __get_oldest_req(mdsc))) {
4640                         dout("wait_requests timed out on tid %llu\n",
4641                              req->r_tid);
4642                         list_del_init(&req->r_wait);
4643                         __unregister_request(mdsc, req);
4644                 }
4645         }
4646         mutex_unlock(&mdsc->mutex);
4647         dout("wait_requests done\n");
4648 }
4649
4650 /*
4651  * called before mount is ro, and before dentries are torn down.
4652  * (hmm, does this still race with new lookups?)
4653  */
4654 void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
4655 {
4656         dout("pre_umount\n");
4657         mdsc->stopping = 1;
4658
4659         lock_unlock_sessions(mdsc);
4660         ceph_flush_dirty_caps(mdsc);
4661         wait_requests(mdsc);
4662
4663         /*
4664          * wait for reply handlers to drop their request refs and
4665          * their inode/dcache refs
4666          */
4667         ceph_msgr_flush();
4668
4669         ceph_cleanup_quotarealms_inodes(mdsc);
4670 }
4671
4672 /*
4673  * wait for all write mds requests to flush.
4674  */
4675 static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid)
4676 {
4677         struct ceph_mds_request *req = NULL, *nextreq;
4678         struct rb_node *n;
4679
4680         mutex_lock(&mdsc->mutex);
4681         dout("wait_unsafe_requests want %lld\n", want_tid);
4682 restart:
4683         req = __get_oldest_req(mdsc);
4684         while (req && req->r_tid <= want_tid) {
4685                 /* find next request */
4686                 n = rb_next(&req->r_node);
4687                 if (n)
4688                         nextreq = rb_entry(n, struct ceph_mds_request, r_node);
4689                 else
4690                         nextreq = NULL;
4691                 if (req->r_op != CEPH_MDS_OP_SETFILELOCK &&
4692                     (req->r_op & CEPH_MDS_OP_WRITE)) {
4693                         /* write op */
4694                         ceph_mdsc_get_request(req);
4695                         if (nextreq)
4696                                 ceph_mdsc_get_request(nextreq);
4697                         mutex_unlock(&mdsc->mutex);
4698                         dout("wait_unsafe_requests  wait on %llu (want %llu)\n",
4699                              req->r_tid, want_tid);
4700                         wait_for_completion(&req->r_safe_completion);
4701                         mutex_lock(&mdsc->mutex);
4702                         ceph_mdsc_put_request(req);
4703                         if (!nextreq)
4704                                 break;  /* next dne before, so we're done! */
4705                         if (RB_EMPTY_NODE(&nextreq->r_node)) {
4706                                 /* next request was removed from tree */
4707                                 ceph_mdsc_put_request(nextreq);
4708                                 goto restart;
4709                         }
4710                         ceph_mdsc_put_request(nextreq);  /* won't go away */
4711                 }
4712                 req = nextreq;
4713         }
4714         mutex_unlock(&mdsc->mutex);
4715         dout("wait_unsafe_requests done\n");
4716 }
4717
4718 void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
4719 {
4720         u64 want_tid, want_flush;
4721
4722         if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
4723                 return;
4724
4725         dout("sync\n");
4726         mutex_lock(&mdsc->mutex);
4727         want_tid = mdsc->last_tid;
4728         mutex_unlock(&mdsc->mutex);
4729
4730         ceph_flush_dirty_caps(mdsc);
4731         spin_lock(&mdsc->cap_dirty_lock);
4732         want_flush = mdsc->last_cap_flush_tid;
4733         if (!list_empty(&mdsc->cap_flush_list)) {
4734                 struct ceph_cap_flush *cf =
4735                         list_last_entry(&mdsc->cap_flush_list,
4736                                         struct ceph_cap_flush, g_list);
4737                 cf->wake = true;
4738         }
4739         spin_unlock(&mdsc->cap_dirty_lock);
4740
4741         dout("sync want tid %lld flush_seq %lld\n",
4742              want_tid, want_flush);
4743
4744         wait_unsafe_requests(mdsc, want_tid);
4745         wait_caps_flush(mdsc, want_flush);
4746 }
4747
4748 /*
4749  * true if all sessions are closed, or we force unmount
4750  */
4751 static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped)
4752 {
4753         if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
4754                 return true;
4755         return atomic_read(&mdsc->num_sessions) <= skipped;
4756 }
4757
4758 /*
4759  * called after sb is ro.
4760  */
4761 void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
4762 {
4763         struct ceph_options *opts = mdsc->fsc->client->options;
4764         struct ceph_mds_session *session;
4765         int i;
4766         int skipped = 0;
4767
4768         dout("close_sessions\n");
4769
4770         /* close sessions */
4771         mutex_lock(&mdsc->mutex);
4772         for (i = 0; i < mdsc->max_sessions; i++) {
4773                 session = __ceph_lookup_mds_session(mdsc, i);
4774                 if (!session)
4775                         continue;
4776                 mutex_unlock(&mdsc->mutex);
4777                 mutex_lock(&session->s_mutex);
4778                 if (__close_session(mdsc, session) <= 0)
4779                         skipped++;
4780                 mutex_unlock(&session->s_mutex);
4781                 ceph_put_mds_session(session);
4782                 mutex_lock(&mdsc->mutex);
4783         }
4784         mutex_unlock(&mdsc->mutex);
4785
4786         dout("waiting for sessions to close\n");
4787         wait_event_timeout(mdsc->session_close_wq,
4788                            done_closing_sessions(mdsc, skipped),
4789                            ceph_timeout_jiffies(opts->mount_timeout));
4790
4791         /* tear down remaining sessions */
4792         mutex_lock(&mdsc->mutex);
4793         for (i = 0; i < mdsc->max_sessions; i++) {
4794                 if (mdsc->sessions[i]) {
4795                         session = ceph_get_mds_session(mdsc->sessions[i]);
4796                         __unregister_session(mdsc, session);
4797                         mutex_unlock(&mdsc->mutex);
4798                         mutex_lock(&session->s_mutex);
4799                         remove_session_caps(session);
4800                         mutex_unlock(&session->s_mutex);
4801                         ceph_put_mds_session(session);
4802                         mutex_lock(&mdsc->mutex);
4803                 }
4804         }
4805         WARN_ON(!list_empty(&mdsc->cap_delay_list));
4806         mutex_unlock(&mdsc->mutex);
4807
4808         ceph_cleanup_snapid_map(mdsc);
4809         ceph_cleanup_empty_realms(mdsc);
4810
4811         cancel_work_sync(&mdsc->cap_reclaim_work);
4812         cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
4813
4814         dout("stopped\n");
4815 }
4816
4817 void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
4818 {
4819         struct ceph_mds_session *session;
4820         int mds;
4821
4822         dout("force umount\n");
4823
4824         mutex_lock(&mdsc->mutex);
4825         for (mds = 0; mds < mdsc->max_sessions; mds++) {
4826                 session = __ceph_lookup_mds_session(mdsc, mds);
4827                 if (!session)
4828                         continue;
4829
4830                 if (session->s_state == CEPH_MDS_SESSION_REJECTED)
4831                         __unregister_session(mdsc, session);
4832                 __wake_requests(mdsc, &session->s_waiting);
4833                 mutex_unlock(&mdsc->mutex);
4834
4835                 mutex_lock(&session->s_mutex);
4836                 __close_session(mdsc, session);
4837                 if (session->s_state == CEPH_MDS_SESSION_CLOSING) {
4838                         cleanup_session_requests(mdsc, session);
4839                         remove_session_caps(session);
4840                 }
4841                 mutex_unlock(&session->s_mutex);
4842                 ceph_put_mds_session(session);
4843
4844                 mutex_lock(&mdsc->mutex);
4845                 kick_requests(mdsc, mds);
4846         }
4847         __wake_requests(mdsc, &mdsc->waiting_for_map);
4848         mutex_unlock(&mdsc->mutex);
4849 }
4850
4851 static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
4852 {
4853         dout("stop\n");
4854         /*
4855          * Make sure the delayed work stopped before releasing
4856          * the resources.
4857          *
4858          * Because the cancel_delayed_work_sync() will only
4859          * guarantee that the work finishes executing. But the
4860          * delayed work will re-arm itself again after that.
4861          */
4862         flush_delayed_work(&mdsc->delayed_work);
4863
4864         if (mdsc->mdsmap)
4865                 ceph_mdsmap_destroy(mdsc->mdsmap);
4866         kfree(mdsc->sessions);
4867         ceph_caps_finalize(mdsc);
4868         ceph_pool_perm_destroy(mdsc);
4869 }
4870
4871 void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
4872 {
4873         struct ceph_mds_client *mdsc = fsc->mdsc;
4874         dout("mdsc_destroy %p\n", mdsc);
4875
4876         if (!mdsc)
4877                 return;
4878
4879         /* flush out any connection work with references to us */
4880         ceph_msgr_flush();
4881
4882         ceph_mdsc_stop(mdsc);
4883
4884         ceph_metric_destroy(&mdsc->metric);
4885
4886         fsc->mdsc = NULL;
4887         kfree(mdsc);
4888         dout("mdsc_destroy %p done\n", mdsc);
4889 }
4890
4891 void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
4892 {
4893         struct ceph_fs_client *fsc = mdsc->fsc;
4894         const char *mds_namespace = fsc->mount_options->mds_namespace;
4895         void *p = msg->front.iov_base;
4896         void *end = p + msg->front.iov_len;
4897         u32 epoch;
4898         u32 map_len;
4899         u32 num_fs;
4900         u32 mount_fscid = (u32)-1;
4901         u8 struct_v, struct_cv;
4902         int err = -EINVAL;
4903
4904         ceph_decode_need(&p, end, sizeof(u32), bad);
4905         epoch = ceph_decode_32(&p);
4906
4907         dout("handle_fsmap epoch %u\n", epoch);
4908
4909         ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
4910         struct_v = ceph_decode_8(&p);
4911         struct_cv = ceph_decode_8(&p);
4912         map_len = ceph_decode_32(&p);
4913
4914         ceph_decode_need(&p, end, sizeof(u32) * 3, bad);
4915         p += sizeof(u32) * 2; /* skip epoch and legacy_client_fscid */
4916
4917         num_fs = ceph_decode_32(&p);
4918         while (num_fs-- > 0) {
4919                 void *info_p, *info_end;
4920                 u32 info_len;
4921                 u8 info_v, info_cv;
4922                 u32 fscid, namelen;
4923
4924                 ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
4925                 info_v = ceph_decode_8(&p);
4926                 info_cv = ceph_decode_8(&p);
4927                 info_len = ceph_decode_32(&p);
4928                 ceph_decode_need(&p, end, info_len, bad);
4929                 info_p = p;
4930                 info_end = p + info_len;
4931                 p = info_end;
4932
4933                 ceph_decode_need(&info_p, info_end, sizeof(u32) * 2, bad);
4934                 fscid = ceph_decode_32(&info_p);
4935                 namelen = ceph_decode_32(&info_p);
4936                 ceph_decode_need(&info_p, info_end, namelen, bad);
4937
4938                 if (mds_namespace &&
4939                     strlen(mds_namespace) == namelen &&
4940                     !strncmp(mds_namespace, (char *)info_p, namelen)) {
4941                         mount_fscid = fscid;
4942                         break;
4943                 }
4944         }
4945
4946         ceph_monc_got_map(&fsc->client->monc, CEPH_SUB_FSMAP, epoch);
4947         if (mount_fscid != (u32)-1) {
4948                 fsc->client->monc.fs_cluster_id = mount_fscid;
4949                 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
4950                                    0, true);
4951                 ceph_monc_renew_subs(&fsc->client->monc);
4952         } else {
4953                 err = -ENOENT;
4954                 goto err_out;
4955         }
4956         return;
4957
4958 bad:
4959         pr_err("error decoding fsmap\n");
4960 err_out:
4961         mutex_lock(&mdsc->mutex);
4962         mdsc->mdsmap_err = err;
4963         __wake_requests(mdsc, &mdsc->waiting_for_map);
4964         mutex_unlock(&mdsc->mutex);
4965 }
4966
4967 /*
4968  * handle mds map update.
4969  */
4970 void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
4971 {
4972         u32 epoch;
4973         u32 maplen;
4974         void *p = msg->front.iov_base;
4975         void *end = p + msg->front.iov_len;
4976         struct ceph_mdsmap *newmap, *oldmap;
4977         struct ceph_fsid fsid;
4978         int err = -EINVAL;
4979
4980         ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
4981         ceph_decode_copy(&p, &fsid, sizeof(fsid));
4982         if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0)
4983                 return;
4984         epoch = ceph_decode_32(&p);
4985         maplen = ceph_decode_32(&p);
4986         dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
4987
4988         /* do we need it? */
4989         mutex_lock(&mdsc->mutex);
4990         if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
4991                 dout("handle_map epoch %u <= our %u\n",
4992                      epoch, mdsc->mdsmap->m_epoch);
4993                 mutex_unlock(&mdsc->mutex);
4994                 return;
4995         }
4996
4997         newmap = ceph_mdsmap_decode(&p, end);
4998         if (IS_ERR(newmap)) {
4999                 err = PTR_ERR(newmap);
5000                 goto bad_unlock;
5001         }
5002
5003         /* swap into place */
5004         if (mdsc->mdsmap) {
5005                 oldmap = mdsc->mdsmap;
5006                 mdsc->mdsmap = newmap;
5007                 check_new_map(mdsc, newmap, oldmap);
5008                 ceph_mdsmap_destroy(oldmap);
5009         } else {
5010                 mdsc->mdsmap = newmap;  /* first mds map */
5011         }
5012         mdsc->fsc->max_file_size = min((loff_t)mdsc->mdsmap->m_max_file_size,
5013                                         MAX_LFS_FILESIZE);
5014
5015         __wake_requests(mdsc, &mdsc->waiting_for_map);
5016         ceph_monc_got_map(&mdsc->fsc->client->monc, CEPH_SUB_MDSMAP,
5017                           mdsc->mdsmap->m_epoch);
5018
5019         mutex_unlock(&mdsc->mutex);
5020         schedule_delayed(mdsc, 0);
5021         return;
5022
5023 bad_unlock:
5024         mutex_unlock(&mdsc->mutex);
5025 bad:
5026         pr_err("error decoding mdsmap %d\n", err);
5027         return;
5028 }
5029
5030 static struct ceph_connection *con_get(struct ceph_connection *con)
5031 {
5032         struct ceph_mds_session *s = con->private;
5033
5034         if (ceph_get_mds_session(s))
5035                 return con;
5036         return NULL;
5037 }
5038
5039 static void con_put(struct ceph_connection *con)
5040 {
5041         struct ceph_mds_session *s = con->private;
5042
5043         ceph_put_mds_session(s);
5044 }
5045
5046 /*
5047  * if the client is unresponsive for long enough, the mds will kill
5048  * the session entirely.
5049  */
5050 static void peer_reset(struct ceph_connection *con)
5051 {
5052         struct ceph_mds_session *s = con->private;
5053         struct ceph_mds_client *mdsc = s->s_mdsc;
5054
5055         pr_warn("mds%d closed our session\n", s->s_mds);
5056         send_mds_reconnect(mdsc, s);
5057 }
5058
5059 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
5060 {
5061         struct ceph_mds_session *s = con->private;
5062         struct ceph_mds_client *mdsc = s->s_mdsc;
5063         int type = le16_to_cpu(msg->hdr.type);
5064
5065         mutex_lock(&mdsc->mutex);
5066         if (__verify_registered_session(mdsc, s) < 0) {
5067                 mutex_unlock(&mdsc->mutex);
5068                 goto out;
5069         }
5070         mutex_unlock(&mdsc->mutex);
5071
5072         switch (type) {
5073         case CEPH_MSG_MDS_MAP:
5074                 ceph_mdsc_handle_mdsmap(mdsc, msg);
5075                 break;
5076         case CEPH_MSG_FS_MAP_USER:
5077                 ceph_mdsc_handle_fsmap(mdsc, msg);
5078                 break;
5079         case CEPH_MSG_CLIENT_SESSION:
5080                 handle_session(s, msg);
5081                 break;
5082         case CEPH_MSG_CLIENT_REPLY:
5083                 handle_reply(s, msg);
5084                 break;
5085         case CEPH_MSG_CLIENT_REQUEST_FORWARD:
5086                 handle_forward(mdsc, s, msg);
5087                 break;
5088         case CEPH_MSG_CLIENT_CAPS:
5089                 ceph_handle_caps(s, msg);
5090                 break;
5091         case CEPH_MSG_CLIENT_SNAP:
5092                 ceph_handle_snap(mdsc, s, msg);
5093                 break;
5094         case CEPH_MSG_CLIENT_LEASE:
5095                 handle_lease(mdsc, s, msg);
5096                 break;
5097         case CEPH_MSG_CLIENT_QUOTA:
5098                 ceph_handle_quota(mdsc, s, msg);
5099                 break;
5100
5101         default:
5102                 pr_err("received unknown message type %d %s\n", type,
5103                        ceph_msg_type_name(type));
5104         }
5105 out:
5106         ceph_msg_put(msg);
5107 }
5108
5109 /*
5110  * authentication
5111  */
5112
5113 /*
5114  * Note: returned pointer is the address of a structure that's
5115  * managed separately.  Caller must *not* attempt to free it.
5116  */
5117 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
5118                                         int *proto, int force_new)
5119 {
5120         struct ceph_mds_session *s = con->private;
5121         struct ceph_mds_client *mdsc = s->s_mdsc;
5122         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5123         struct ceph_auth_handshake *auth = &s->s_auth;
5124
5125         if (force_new && auth->authorizer) {
5126                 ceph_auth_destroy_authorizer(auth->authorizer);
5127                 auth->authorizer = NULL;
5128         }
5129         if (!auth->authorizer) {
5130                 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
5131                                                       auth);
5132                 if (ret)
5133                         return ERR_PTR(ret);
5134         } else {
5135                 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
5136                                                       auth);
5137                 if (ret)
5138                         return ERR_PTR(ret);
5139         }
5140         *proto = ac->protocol;
5141
5142         return auth;
5143 }
5144
5145 static int add_authorizer_challenge(struct ceph_connection *con,
5146                                     void *challenge_buf, int challenge_buf_len)
5147 {
5148         struct ceph_mds_session *s = con->private;
5149         struct ceph_mds_client *mdsc = s->s_mdsc;
5150         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5151
5152         return ceph_auth_add_authorizer_challenge(ac, s->s_auth.authorizer,
5153                                             challenge_buf, challenge_buf_len);
5154 }
5155
5156 static int verify_authorizer_reply(struct ceph_connection *con)
5157 {
5158         struct ceph_mds_session *s = con->private;
5159         struct ceph_mds_client *mdsc = s->s_mdsc;
5160         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5161
5162         return ceph_auth_verify_authorizer_reply(ac, s->s_auth.authorizer);
5163 }
5164
5165 static int invalidate_authorizer(struct ceph_connection *con)
5166 {
5167         struct ceph_mds_session *s = con->private;
5168         struct ceph_mds_client *mdsc = s->s_mdsc;
5169         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
5170
5171         ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS);
5172
5173         return ceph_monc_validate_auth(&mdsc->fsc->client->monc);
5174 }
5175
5176 static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con,
5177                                 struct ceph_msg_header *hdr, int *skip)
5178 {
5179         struct ceph_msg *msg;
5180         int type = (int) le16_to_cpu(hdr->type);
5181         int front_len = (int) le32_to_cpu(hdr->front_len);
5182
5183         if (con->in_msg)
5184                 return con->in_msg;
5185
5186         *skip = 0;
5187         msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
5188         if (!msg) {
5189                 pr_err("unable to allocate msg type %d len %d\n",
5190                        type, front_len);
5191                 return NULL;
5192         }
5193
5194         return msg;
5195 }
5196
5197 static int mds_sign_message(struct ceph_msg *msg)
5198 {
5199        struct ceph_mds_session *s = msg->con->private;
5200        struct ceph_auth_handshake *auth = &s->s_auth;
5201
5202        return ceph_auth_sign_message(auth, msg);
5203 }
5204
5205 static int mds_check_message_signature(struct ceph_msg *msg)
5206 {
5207        struct ceph_mds_session *s = msg->con->private;
5208        struct ceph_auth_handshake *auth = &s->s_auth;
5209
5210        return ceph_auth_check_message_signature(auth, msg);
5211 }
5212
5213 static const struct ceph_connection_operations mds_con_ops = {
5214         .get = con_get,
5215         .put = con_put,
5216         .dispatch = dispatch,
5217         .get_authorizer = get_authorizer,
5218         .add_authorizer_challenge = add_authorizer_challenge,
5219         .verify_authorizer_reply = verify_authorizer_reply,
5220         .invalidate_authorizer = invalidate_authorizer,
5221         .peer_reset = peer_reset,
5222         .alloc_msg = mds_alloc_msg,
5223         .sign_message = mds_sign_message,
5224         .check_message_signature = mds_check_message_signature,
5225 };
5226
5227 /* eof */