1 #include <linux/ceph/ceph_debug.h>
4 #include <linux/kernel.h>
5 #include <linux/sched.h>
6 #include <linux/slab.h>
7 #include <linux/vmalloc.h>
8 #include <linux/wait.h>
9 #include <linux/writeback.h>
12 #include "mds_client.h"
14 #include <linux/ceph/decode.h>
15 #include <linux/ceph/messenger.h>
18 * Capability management
20 * The Ceph metadata servers control client access to inode metadata
21 * and file data by issuing capabilities, granting clients permission
22 * to read and/or write both inode field and file data to OSDs
23 * (storage nodes). Each capability consists of a set of bits
24 * indicating which operations are allowed.
26 * If the client holds a *_SHARED cap, the client has a coherent value
27 * that can be safely read from the cached inode.
29 * In the case of a *_EXCL (exclusive) or FILE_WR capabilities, the
30 * client is allowed to change inode attributes (e.g., file size,
31 * mtime), note its dirty state in the ceph_cap, and asynchronously
32 * flush that metadata change to the MDS.
34 * In the event of a conflicting operation (perhaps by another
35 * client), the MDS will revoke the conflicting client capabilities.
37 * In order for a client to cache an inode, it must hold a capability
38 * with at least one MDS server. When inodes are released, release
39 * notifications are batched and periodically sent en masse to the MDS
40 * cluster to release server state.
43 static u64 __get_oldest_flush_tid(struct ceph_mds_client *mdsc);
44 static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
45 struct ceph_mds_session *session,
46 struct ceph_inode_info *ci,
47 u64 oldest_flush_tid);
50 * Generate readable cap strings for debugging output.
52 #define MAX_CAP_STR 20
53 static char cap_str[MAX_CAP_STR][40];
54 static DEFINE_SPINLOCK(cap_str_lock);
55 static int last_cap_str;
57 static char *gcap_string(char *s, int c)
59 if (c & CEPH_CAP_GSHARED)
61 if (c & CEPH_CAP_GEXCL)
63 if (c & CEPH_CAP_GCACHE)
69 if (c & CEPH_CAP_GBUFFER)
71 if (c & CEPH_CAP_GLAZYIO)
76 const char *ceph_cap_string(int caps)
82 spin_lock(&cap_str_lock);
84 if (last_cap_str == MAX_CAP_STR)
86 spin_unlock(&cap_str_lock);
90 if (caps & CEPH_CAP_PIN)
93 c = (caps >> CEPH_CAP_SAUTH) & 3;
96 s = gcap_string(s, c);
99 c = (caps >> CEPH_CAP_SLINK) & 3;
102 s = gcap_string(s, c);
105 c = (caps >> CEPH_CAP_SXATTR) & 3;
108 s = gcap_string(s, c);
111 c = caps >> CEPH_CAP_SFILE;
114 s = gcap_string(s, c);
123 void ceph_caps_init(struct ceph_mds_client *mdsc)
125 INIT_LIST_HEAD(&mdsc->caps_list);
126 spin_lock_init(&mdsc->caps_list_lock);
129 void ceph_caps_finalize(struct ceph_mds_client *mdsc)
131 struct ceph_cap *cap;
133 spin_lock(&mdsc->caps_list_lock);
134 while (!list_empty(&mdsc->caps_list)) {
135 cap = list_first_entry(&mdsc->caps_list,
136 struct ceph_cap, caps_item);
137 list_del(&cap->caps_item);
138 kmem_cache_free(ceph_cap_cachep, cap);
140 mdsc->caps_total_count = 0;
141 mdsc->caps_avail_count = 0;
142 mdsc->caps_use_count = 0;
143 mdsc->caps_reserve_count = 0;
144 mdsc->caps_min_count = 0;
145 spin_unlock(&mdsc->caps_list_lock);
148 void ceph_adjust_min_caps(struct ceph_mds_client *mdsc, int delta)
150 spin_lock(&mdsc->caps_list_lock);
151 mdsc->caps_min_count += delta;
152 BUG_ON(mdsc->caps_min_count < 0);
153 spin_unlock(&mdsc->caps_list_lock);
156 void ceph_reserve_caps(struct ceph_mds_client *mdsc,
157 struct ceph_cap_reservation *ctx, int need)
160 struct ceph_cap *cap;
165 dout("reserve caps ctx=%p need=%d\n", ctx, need);
167 /* first reserve any caps that are already allocated */
168 spin_lock(&mdsc->caps_list_lock);
169 if (mdsc->caps_avail_count >= need)
172 have = mdsc->caps_avail_count;
173 mdsc->caps_avail_count -= have;
174 mdsc->caps_reserve_count += have;
175 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
176 mdsc->caps_reserve_count +
177 mdsc->caps_avail_count);
178 spin_unlock(&mdsc->caps_list_lock);
180 for (i = have; i < need; i++) {
181 cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
184 list_add(&cap->caps_item, &newcaps);
187 /* we didn't manage to reserve as much as we needed */
188 if (have + alloc != need)
189 pr_warn("reserve caps ctx=%p ENOMEM need=%d got=%d\n",
190 ctx, need, have + alloc);
192 spin_lock(&mdsc->caps_list_lock);
193 mdsc->caps_total_count += alloc;
194 mdsc->caps_reserve_count += alloc;
195 list_splice(&newcaps, &mdsc->caps_list);
197 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
198 mdsc->caps_reserve_count +
199 mdsc->caps_avail_count);
200 spin_unlock(&mdsc->caps_list_lock);
203 dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
204 ctx, mdsc->caps_total_count, mdsc->caps_use_count,
205 mdsc->caps_reserve_count, mdsc->caps_avail_count);
208 int ceph_unreserve_caps(struct ceph_mds_client *mdsc,
209 struct ceph_cap_reservation *ctx)
211 dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count);
213 spin_lock(&mdsc->caps_list_lock);
214 BUG_ON(mdsc->caps_reserve_count < ctx->count);
215 mdsc->caps_reserve_count -= ctx->count;
216 mdsc->caps_avail_count += ctx->count;
218 dout("unreserve caps %d = %d used + %d resv + %d avail\n",
219 mdsc->caps_total_count, mdsc->caps_use_count,
220 mdsc->caps_reserve_count, mdsc->caps_avail_count);
221 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
222 mdsc->caps_reserve_count +
223 mdsc->caps_avail_count);
224 spin_unlock(&mdsc->caps_list_lock);
229 struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc,
230 struct ceph_cap_reservation *ctx)
232 struct ceph_cap *cap = NULL;
234 /* temporary, until we do something about cap import/export */
236 cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
238 spin_lock(&mdsc->caps_list_lock);
239 mdsc->caps_use_count++;
240 mdsc->caps_total_count++;
241 spin_unlock(&mdsc->caps_list_lock);
246 spin_lock(&mdsc->caps_list_lock);
247 dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n",
248 ctx, ctx->count, mdsc->caps_total_count, mdsc->caps_use_count,
249 mdsc->caps_reserve_count, mdsc->caps_avail_count);
251 BUG_ON(ctx->count > mdsc->caps_reserve_count);
252 BUG_ON(list_empty(&mdsc->caps_list));
255 mdsc->caps_reserve_count--;
256 mdsc->caps_use_count++;
258 cap = list_first_entry(&mdsc->caps_list, struct ceph_cap, caps_item);
259 list_del(&cap->caps_item);
261 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
262 mdsc->caps_reserve_count + mdsc->caps_avail_count);
263 spin_unlock(&mdsc->caps_list_lock);
267 void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap)
269 spin_lock(&mdsc->caps_list_lock);
270 dout("put_cap %p %d = %d used + %d resv + %d avail\n",
271 cap, mdsc->caps_total_count, mdsc->caps_use_count,
272 mdsc->caps_reserve_count, mdsc->caps_avail_count);
273 mdsc->caps_use_count--;
275 * Keep some preallocated caps around (ceph_min_count), to
276 * avoid lots of free/alloc churn.
278 if (mdsc->caps_avail_count >= mdsc->caps_reserve_count +
279 mdsc->caps_min_count) {
280 mdsc->caps_total_count--;
281 kmem_cache_free(ceph_cap_cachep, cap);
283 mdsc->caps_avail_count++;
284 list_add(&cap->caps_item, &mdsc->caps_list);
287 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
288 mdsc->caps_reserve_count + mdsc->caps_avail_count);
289 spin_unlock(&mdsc->caps_list_lock);
292 void ceph_reservation_status(struct ceph_fs_client *fsc,
293 int *total, int *avail, int *used, int *reserved,
296 struct ceph_mds_client *mdsc = fsc->mdsc;
299 *total = mdsc->caps_total_count;
301 *avail = mdsc->caps_avail_count;
303 *used = mdsc->caps_use_count;
305 *reserved = mdsc->caps_reserve_count;
307 *min = mdsc->caps_min_count;
311 * Find ceph_cap for given mds, if any.
313 * Called with i_ceph_lock held.
315 static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
317 struct ceph_cap *cap;
318 struct rb_node *n = ci->i_caps.rb_node;
321 cap = rb_entry(n, struct ceph_cap, ci_node);
324 else if (mds > cap->mds)
332 struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds)
334 struct ceph_cap *cap;
336 spin_lock(&ci->i_ceph_lock);
337 cap = __get_cap_for_mds(ci, mds);
338 spin_unlock(&ci->i_ceph_lock);
343 * Return id of any MDS with a cap, preferably FILE_WR|BUFFER|EXCL, else -1.
345 static int __ceph_get_cap_mds(struct ceph_inode_info *ci)
347 struct ceph_cap *cap;
351 /* prefer mds with WR|BUFFER|EXCL caps */
352 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
353 cap = rb_entry(p, struct ceph_cap, ci_node);
355 if (cap->issued & (CEPH_CAP_FILE_WR |
356 CEPH_CAP_FILE_BUFFER |
363 int ceph_get_cap_mds(struct inode *inode)
365 struct ceph_inode_info *ci = ceph_inode(inode);
367 spin_lock(&ci->i_ceph_lock);
368 mds = __ceph_get_cap_mds(ceph_inode(inode));
369 spin_unlock(&ci->i_ceph_lock);
374 * Called under i_ceph_lock.
376 static void __insert_cap_node(struct ceph_inode_info *ci,
377 struct ceph_cap *new)
379 struct rb_node **p = &ci->i_caps.rb_node;
380 struct rb_node *parent = NULL;
381 struct ceph_cap *cap = NULL;
385 cap = rb_entry(parent, struct ceph_cap, ci_node);
386 if (new->mds < cap->mds)
388 else if (new->mds > cap->mds)
394 rb_link_node(&new->ci_node, parent, p);
395 rb_insert_color(&new->ci_node, &ci->i_caps);
399 * (re)set cap hold timeouts, which control the delayed release
400 * of unused caps back to the MDS. Should be called on cap use.
402 static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
403 struct ceph_inode_info *ci)
405 struct ceph_mount_options *ma = mdsc->fsc->mount_options;
407 ci->i_hold_caps_min = round_jiffies(jiffies +
408 ma->caps_wanted_delay_min * HZ);
409 ci->i_hold_caps_max = round_jiffies(jiffies +
410 ma->caps_wanted_delay_max * HZ);
411 dout("__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode,
412 ci->i_hold_caps_min - jiffies, ci->i_hold_caps_max - jiffies);
416 * (Re)queue cap at the end of the delayed cap release list.
418 * If I_FLUSH is set, leave the inode at the front of the list.
420 * Caller holds i_ceph_lock
421 * -> we take mdsc->cap_delay_lock
423 static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
424 struct ceph_inode_info *ci)
426 __cap_set_timeouts(mdsc, ci);
427 dout("__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode,
428 ci->i_ceph_flags, ci->i_hold_caps_max);
429 if (!mdsc->stopping) {
430 spin_lock(&mdsc->cap_delay_lock);
431 if (!list_empty(&ci->i_cap_delay_list)) {
432 if (ci->i_ceph_flags & CEPH_I_FLUSH)
434 list_del_init(&ci->i_cap_delay_list);
436 list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
438 spin_unlock(&mdsc->cap_delay_lock);
443 * Queue an inode for immediate writeback. Mark inode with I_FLUSH,
444 * indicating we should send a cap message to flush dirty metadata
445 * asap, and move to the front of the delayed cap list.
447 static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
448 struct ceph_inode_info *ci)
450 dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode);
451 spin_lock(&mdsc->cap_delay_lock);
452 ci->i_ceph_flags |= CEPH_I_FLUSH;
453 if (!list_empty(&ci->i_cap_delay_list))
454 list_del_init(&ci->i_cap_delay_list);
455 list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
456 spin_unlock(&mdsc->cap_delay_lock);
460 * Cancel delayed work on cap.
462 * Caller must hold i_ceph_lock.
464 static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
465 struct ceph_inode_info *ci)
467 dout("__cap_delay_cancel %p\n", &ci->vfs_inode);
468 if (list_empty(&ci->i_cap_delay_list))
470 spin_lock(&mdsc->cap_delay_lock);
471 list_del_init(&ci->i_cap_delay_list);
472 spin_unlock(&mdsc->cap_delay_lock);
476 * Common issue checks for add_cap, handle_cap_grant.
478 static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
481 unsigned had = __ceph_caps_issued(ci, NULL);
484 * Each time we receive FILE_CACHE anew, we increment
487 if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
488 (had & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0) {
493 * if we are newly issued FILE_SHARED, mark dir not complete; we
494 * don't know what happened to this directory while we didn't
497 if ((issued & CEPH_CAP_FILE_SHARED) &&
498 (had & CEPH_CAP_FILE_SHARED) == 0) {
500 if (S_ISDIR(ci->vfs_inode.i_mode)) {
501 dout(" marking %p NOT complete\n", &ci->vfs_inode);
502 __ceph_dir_clear_complete(ci);
508 * Add a capability under the given MDS session.
510 * Caller should hold session snap_rwsem (read) and s_mutex.
512 * @fmode is the open file mode, if we are opening a file, otherwise
513 * it is < 0. (This is so we can atomically add the cap and add an
514 * open file reference to it.)
516 void ceph_add_cap(struct inode *inode,
517 struct ceph_mds_session *session, u64 cap_id,
518 int fmode, unsigned issued, unsigned wanted,
519 unsigned seq, unsigned mseq, u64 realmino, int flags,
520 struct ceph_cap **new_cap)
522 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
523 struct ceph_inode_info *ci = ceph_inode(inode);
524 struct ceph_cap *cap;
525 int mds = session->s_mds;
528 dout("add_cap %p mds%d cap %llx %s seq %d\n", inode,
529 session->s_mds, cap_id, ceph_cap_string(issued), seq);
532 * If we are opening the file, include file mode wanted bits
536 wanted |= ceph_caps_for_mode(fmode);
538 cap = __get_cap_for_mds(ci, mds);
544 cap->implemented = 0;
550 __insert_cap_node(ci, cap);
552 /* add to session cap list */
553 cap->session = session;
554 spin_lock(&session->s_cap_lock);
555 list_add_tail(&cap->session_caps, &session->s_caps);
556 session->s_nr_caps++;
557 spin_unlock(&session->s_cap_lock);
560 * auth mds of the inode changed. we received the cap export
561 * message, but still haven't received the cap import message.
562 * handle_cap_export() updated the new auth MDS' cap.
564 * "ceph_seq_cmp(seq, cap->seq) <= 0" means we are processing
565 * a message that was send before the cap import message. So
568 if (ceph_seq_cmp(seq, cap->seq) <= 0) {
569 WARN_ON(cap != ci->i_auth_cap);
570 WARN_ON(cap->cap_id != cap_id);
573 issued |= cap->issued;
574 flags |= CEPH_CAP_FLAG_AUTH;
578 if (!ci->i_snap_realm) {
580 * add this inode to the appropriate snap realm
582 struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc,
585 spin_lock(&realm->inodes_with_caps_lock);
586 ci->i_snap_realm = realm;
587 list_add(&ci->i_snap_realm_item,
588 &realm->inodes_with_caps);
589 spin_unlock(&realm->inodes_with_caps_lock);
591 pr_err("ceph_add_cap: couldn't find snap realm %llx\n",
597 __check_cap_issue(ci, cap, issued);
600 * If we are issued caps we don't want, or the mds' wanted
601 * value appears to be off, queue a check so we'll release
602 * later and/or update the mds wanted value.
604 actual_wanted = __ceph_caps_wanted(ci);
605 if ((wanted & ~actual_wanted) ||
606 (issued & ~actual_wanted & CEPH_CAP_ANY_WR)) {
607 dout(" issued %s, mds wanted %s, actual %s, queueing\n",
608 ceph_cap_string(issued), ceph_cap_string(wanted),
609 ceph_cap_string(actual_wanted));
610 __cap_delay_requeue(mdsc, ci);
613 if (flags & CEPH_CAP_FLAG_AUTH) {
614 if (ci->i_auth_cap == NULL ||
615 ceph_seq_cmp(ci->i_auth_cap->mseq, mseq) < 0) {
616 ci->i_auth_cap = cap;
617 cap->mds_wanted = wanted;
620 WARN_ON(ci->i_auth_cap == cap);
623 dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n",
624 inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
625 ceph_cap_string(issued|cap->issued), seq, mds);
626 cap->cap_id = cap_id;
627 cap->issued = issued;
628 cap->implemented |= issued;
629 if (ceph_seq_cmp(mseq, cap->mseq) > 0)
630 cap->mds_wanted = wanted;
632 cap->mds_wanted |= wanted;
634 cap->issue_seq = seq;
636 cap->cap_gen = session->s_cap_gen;
639 __ceph_get_fmode(ci, fmode);
643 * Return true if cap has not timed out and belongs to the current
644 * generation of the MDS session (i.e. has not gone 'stale' due to
645 * us losing touch with the mds).
647 static int __cap_is_valid(struct ceph_cap *cap)
652 spin_lock(&cap->session->s_gen_ttl_lock);
653 gen = cap->session->s_cap_gen;
654 ttl = cap->session->s_cap_ttl;
655 spin_unlock(&cap->session->s_gen_ttl_lock);
657 if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) {
658 dout("__cap_is_valid %p cap %p issued %s "
659 "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode,
660 cap, ceph_cap_string(cap->issued), cap->cap_gen, gen);
668 * Return set of valid cap bits issued to us. Note that caps time
669 * out, and may be invalidated in bulk if the client session times out
670 * and session->s_cap_gen is bumped.
672 int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented)
674 int have = ci->i_snap_caps;
675 struct ceph_cap *cap;
680 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
681 cap = rb_entry(p, struct ceph_cap, ci_node);
682 if (!__cap_is_valid(cap))
684 dout("__ceph_caps_issued %p cap %p issued %s\n",
685 &ci->vfs_inode, cap, ceph_cap_string(cap->issued));
688 *implemented |= cap->implemented;
691 * exclude caps issued by non-auth MDS, but are been revoking
692 * by the auth MDS. The non-auth MDS should be revoking/exporting
693 * these caps, but the message is delayed.
695 if (ci->i_auth_cap) {
696 cap = ci->i_auth_cap;
697 have &= ~cap->implemented | cap->issued;
703 * Get cap bits issued by caps other than @ocap
705 int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap)
707 int have = ci->i_snap_caps;
708 struct ceph_cap *cap;
711 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
712 cap = rb_entry(p, struct ceph_cap, ci_node);
715 if (!__cap_is_valid(cap))
723 * Move a cap to the end of the LRU (oldest caps at list head, newest
726 static void __touch_cap(struct ceph_cap *cap)
728 struct ceph_mds_session *s = cap->session;
730 spin_lock(&s->s_cap_lock);
731 if (s->s_cap_iterator == NULL) {
732 dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap,
734 list_move_tail(&cap->session_caps, &s->s_caps);
736 dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n",
737 &cap->ci->vfs_inode, cap, s->s_mds);
739 spin_unlock(&s->s_cap_lock);
743 * Check if we hold the given mask. If so, move the cap(s) to the
744 * front of their respective LRUs. (This is the preferred way for
745 * callers to check for caps they want.)
747 int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
749 struct ceph_cap *cap;
751 int have = ci->i_snap_caps;
753 if ((have & mask) == mask) {
754 dout("__ceph_caps_issued_mask %p snap issued %s"
755 " (mask %s)\n", &ci->vfs_inode,
756 ceph_cap_string(have),
757 ceph_cap_string(mask));
761 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
762 cap = rb_entry(p, struct ceph_cap, ci_node);
763 if (!__cap_is_valid(cap))
765 if ((cap->issued & mask) == mask) {
766 dout("__ceph_caps_issued_mask %p cap %p issued %s"
767 " (mask %s)\n", &ci->vfs_inode, cap,
768 ceph_cap_string(cap->issued),
769 ceph_cap_string(mask));
775 /* does a combination of caps satisfy mask? */
777 if ((have & mask) == mask) {
778 dout("__ceph_caps_issued_mask %p combo issued %s"
779 " (mask %s)\n", &ci->vfs_inode,
780 ceph_cap_string(cap->issued),
781 ceph_cap_string(mask));
785 /* touch this + preceding caps */
787 for (q = rb_first(&ci->i_caps); q != p;
789 cap = rb_entry(q, struct ceph_cap,
791 if (!__cap_is_valid(cap))
804 * Return true if mask caps are currently being revoked by an MDS.
806 int __ceph_caps_revoking_other(struct ceph_inode_info *ci,
807 struct ceph_cap *ocap, int mask)
809 struct ceph_cap *cap;
812 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
813 cap = rb_entry(p, struct ceph_cap, ci_node);
815 (cap->implemented & ~cap->issued & mask))
821 int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
823 struct inode *inode = &ci->vfs_inode;
826 spin_lock(&ci->i_ceph_lock);
827 ret = __ceph_caps_revoking_other(ci, NULL, mask);
828 spin_unlock(&ci->i_ceph_lock);
829 dout("ceph_caps_revoking %p %s = %d\n", inode,
830 ceph_cap_string(mask), ret);
834 int __ceph_caps_used(struct ceph_inode_info *ci)
838 used |= CEPH_CAP_PIN;
840 used |= CEPH_CAP_FILE_RD;
841 if (ci->i_rdcache_ref ||
842 (!S_ISDIR(ci->vfs_inode.i_mode) && /* ignore readdir cache */
843 ci->vfs_inode.i_data.nrpages))
844 used |= CEPH_CAP_FILE_CACHE;
846 used |= CEPH_CAP_FILE_WR;
847 if (ci->i_wb_ref || ci->i_wrbuffer_ref)
848 used |= CEPH_CAP_FILE_BUFFER;
853 * wanted, by virtue of open file modes
855 int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
858 for (i = 0; i < CEPH_FILE_MODE_BITS; i++) {
859 if (ci->i_nr_by_mode[i])
864 return ceph_caps_for_mode(bits >> 1);
868 * Return caps we have registered with the MDS(s) as 'wanted'.
870 int __ceph_caps_mds_wanted(struct ceph_inode_info *ci)
872 struct ceph_cap *cap;
876 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
877 cap = rb_entry(p, struct ceph_cap, ci_node);
878 if (!__cap_is_valid(cap))
880 if (cap == ci->i_auth_cap)
881 mds_wanted |= cap->mds_wanted;
883 mds_wanted |= (cap->mds_wanted & ~CEPH_CAP_ANY_FILE_WR);
889 * called under i_ceph_lock
891 static int __ceph_is_any_caps(struct ceph_inode_info *ci)
893 return !RB_EMPTY_ROOT(&ci->i_caps);
896 int ceph_is_any_caps(struct inode *inode)
898 struct ceph_inode_info *ci = ceph_inode(inode);
901 spin_lock(&ci->i_ceph_lock);
902 ret = __ceph_is_any_caps(ci);
903 spin_unlock(&ci->i_ceph_lock);
908 static void drop_inode_snap_realm(struct ceph_inode_info *ci)
910 struct ceph_snap_realm *realm = ci->i_snap_realm;
911 spin_lock(&realm->inodes_with_caps_lock);
912 list_del_init(&ci->i_snap_realm_item);
913 ci->i_snap_realm_counter++;
914 ci->i_snap_realm = NULL;
915 spin_unlock(&realm->inodes_with_caps_lock);
916 ceph_put_snap_realm(ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc,
921 * Remove a cap. Take steps to deal with a racing iterate_session_caps.
923 * caller should hold i_ceph_lock.
924 * caller will not hold session s_mutex if called from destroy_inode.
926 void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
928 struct ceph_mds_session *session = cap->session;
929 struct ceph_inode_info *ci = cap->ci;
930 struct ceph_mds_client *mdsc;
933 /* 'ci' being NULL means the remove have already occurred */
935 dout("%s: cap inode is NULL\n", __func__);
939 dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
941 mdsc = ceph_inode_to_client(&ci->vfs_inode)->mdsc;
943 /* remove from inode's cap rbtree, and clear auth cap */
944 rb_erase(&cap->ci_node, &ci->i_caps);
945 if (ci->i_auth_cap == cap)
946 ci->i_auth_cap = NULL;
948 /* remove from session list */
949 spin_lock(&session->s_cap_lock);
950 if (session->s_cap_iterator == cap) {
951 /* not yet, we are iterating over this very cap */
952 dout("__ceph_remove_cap delaying %p removal from session %p\n",
955 list_del_init(&cap->session_caps);
956 session->s_nr_caps--;
960 /* protect backpointer with s_cap_lock: see iterate_session_caps */
964 * s_cap_reconnect is protected by s_cap_lock. no one changes
965 * s_cap_gen while session is in the reconnect state.
968 (!session->s_cap_reconnect || cap->cap_gen == session->s_cap_gen)) {
969 cap->queue_release = 1;
971 list_add_tail(&cap->session_caps,
972 &session->s_cap_releases);
973 session->s_num_cap_releases++;
977 cap->queue_release = 0;
979 cap->cap_ino = ci->i_vino.ino;
981 spin_unlock(&session->s_cap_lock);
984 ceph_put_cap(mdsc, cap);
986 /* when reconnect denied, we remove session caps forcibly,
987 * i_wr_ref can be non-zero. If there are ongoing write,
990 if (!__ceph_is_any_caps(ci) && ci->i_wr_ref == 0 && ci->i_snap_realm)
991 drop_inode_snap_realm(ci);
993 if (!__ceph_is_any_real_caps(ci))
994 __cap_delay_cancel(mdsc, ci);
998 * Build and send a cap message to the given MDS.
1000 * Caller should be holding s_mutex.
1002 static int send_cap_msg(struct ceph_mds_session *session,
1003 u64 ino, u64 cid, int op,
1004 int caps, int wanted, int dirty,
1005 u32 seq, u64 flush_tid, u64 oldest_flush_tid,
1006 u32 issue_seq, u32 mseq, u64 size, u64 max_size,
1007 struct timespec *mtime, struct timespec *atime,
1008 struct timespec *ctime, u32 time_warp_seq,
1009 kuid_t uid, kgid_t gid, umode_t mode,
1011 struct ceph_buffer *xattrs_buf,
1012 u64 follows, bool inline_data)
1014 struct ceph_mds_caps *fc;
1015 struct ceph_msg *msg;
1019 dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s"
1020 " seq %u/%u tid %llu/%llu mseq %u follows %lld size %llu/%llu"
1021 " xattr_ver %llu xattr_len %d\n", ceph_cap_op_name(op),
1022 cid, ino, ceph_cap_string(caps), ceph_cap_string(wanted),
1023 ceph_cap_string(dirty),
1024 seq, issue_seq, flush_tid, oldest_flush_tid,
1025 mseq, follows, size, max_size,
1026 xattr_version, xattrs_buf ? (int)xattrs_buf->vec.iov_len : 0);
1028 /* flock buffer size + inline version + inline data size +
1029 * osd_epoch_barrier + oldest_flush_tid */
1030 extra_len = 4 + 8 + 4 + 4 + 8;
1031 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc) + extra_len,
1036 msg->hdr.version = cpu_to_le16(6);
1037 msg->hdr.tid = cpu_to_le64(flush_tid);
1039 fc = msg->front.iov_base;
1040 memset(fc, 0, sizeof(*fc));
1042 fc->cap_id = cpu_to_le64(cid);
1043 fc->op = cpu_to_le32(op);
1044 fc->seq = cpu_to_le32(seq);
1045 fc->issue_seq = cpu_to_le32(issue_seq);
1046 fc->migrate_seq = cpu_to_le32(mseq);
1047 fc->caps = cpu_to_le32(caps);
1048 fc->wanted = cpu_to_le32(wanted);
1049 fc->dirty = cpu_to_le32(dirty);
1050 fc->ino = cpu_to_le64(ino);
1051 fc->snap_follows = cpu_to_le64(follows);
1053 fc->size = cpu_to_le64(size);
1054 fc->max_size = cpu_to_le64(max_size);
1056 ceph_encode_timespec(&fc->mtime, mtime);
1058 ceph_encode_timespec(&fc->atime, atime);
1060 ceph_encode_timespec(&fc->ctime, ctime);
1061 fc->time_warp_seq = cpu_to_le32(time_warp_seq);
1063 fc->uid = cpu_to_le32(from_kuid(&init_user_ns, uid));
1064 fc->gid = cpu_to_le32(from_kgid(&init_user_ns, gid));
1065 fc->mode = cpu_to_le32(mode);
1068 /* flock buffer size */
1069 ceph_encode_32(&p, 0);
1070 /* inline version */
1071 ceph_encode_64(&p, inline_data ? 0 : CEPH_INLINE_NONE);
1072 /* inline data size */
1073 ceph_encode_32(&p, 0);
1074 /* osd_epoch_barrier */
1075 ceph_encode_32(&p, 0);
1076 /* oldest_flush_tid */
1077 ceph_encode_64(&p, oldest_flush_tid);
1079 fc->xattr_version = cpu_to_le64(xattr_version);
1081 msg->middle = ceph_buffer_get(xattrs_buf);
1082 fc->xattr_len = cpu_to_le32(xattrs_buf->vec.iov_len);
1083 msg->hdr.middle_len = cpu_to_le32(xattrs_buf->vec.iov_len);
1086 ceph_con_send(&session->s_con, msg);
1091 * Queue cap releases when an inode is dropped from our cache.
1093 void ceph_queue_caps_release(struct inode *inode)
1095 struct ceph_inode_info *ci = ceph_inode(inode);
1098 /* lock i_ceph_lock, because ceph_d_revalidate(..., LOOKUP_RCU)
1099 * may call __ceph_caps_issued_mask() on a freeing inode. */
1100 spin_lock(&ci->i_ceph_lock);
1101 p = rb_first(&ci->i_caps);
1103 struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
1105 __ceph_remove_cap(cap, true);
1107 spin_unlock(&ci->i_ceph_lock);
1111 * Send a cap msg on the given inode. Update our caps state, then
1112 * drop i_ceph_lock and send the message.
1114 * Make note of max_size reported/requested from mds, revoked caps
1115 * that have now been implemented.
1117 * Make half-hearted attempt ot to invalidate page cache if we are
1118 * dropping RDCACHE. Note that this will leave behind locked pages
1119 * that we'll then need to deal with elsewhere.
1121 * Return non-zero if delayed release, or we experienced an error
1122 * such that the caller should requeue + retry later.
1124 * called with i_ceph_lock, then drops it.
1125 * caller should hold snap_rwsem (read), s_mutex.
1127 static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1128 int op, int used, int want, int retain, int flushing,
1129 u64 flush_tid, u64 oldest_flush_tid)
1130 __releases(cap->ci->i_ceph_lock)
1132 struct ceph_inode_info *ci = cap->ci;
1133 struct inode *inode = &ci->vfs_inode;
1134 u64 cap_id = cap->cap_id;
1135 int held, revoking, dropping, keep;
1136 u64 follows, size, max_size;
1137 u32 seq, issue_seq, mseq, time_warp_seq;
1138 struct timespec mtime, atime, ctime;
1143 struct ceph_mds_session *session;
1144 u64 xattr_version = 0;
1145 struct ceph_buffer *xattr_blob = NULL;
1150 held = cap->issued | cap->implemented;
1151 revoking = cap->implemented & ~cap->issued;
1152 retain &= ~revoking;
1153 dropping = cap->issued & ~retain;
1155 dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n",
1156 inode, cap, cap->session,
1157 ceph_cap_string(held), ceph_cap_string(held & retain),
1158 ceph_cap_string(revoking));
1159 BUG_ON((retain & CEPH_CAP_PIN) == 0);
1161 session = cap->session;
1163 /* don't release wanted unless we've waited a bit. */
1164 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1165 time_before(jiffies, ci->i_hold_caps_min)) {
1166 dout(" delaying issued %s -> %s, wanted %s -> %s on send\n",
1167 ceph_cap_string(cap->issued),
1168 ceph_cap_string(cap->issued & retain),
1169 ceph_cap_string(cap->mds_wanted),
1170 ceph_cap_string(want));
1171 want |= cap->mds_wanted;
1172 retain |= cap->issued;
1175 ci->i_ceph_flags &= ~(CEPH_I_NODELAY | CEPH_I_FLUSH);
1177 cap->issued &= retain; /* drop bits we don't want */
1178 if (cap->implemented & ~cap->issued) {
1180 * Wake up any waiters on wanted -> needed transition.
1181 * This is due to the weird transition from buffered
1182 * to sync IO... we need to flush dirty pages _before_
1183 * allowing sync writes to avoid reordering.
1187 cap->implemented &= cap->issued | used;
1188 cap->mds_wanted = want;
1190 follows = flushing ? ci->i_head_snapc->seq : 0;
1192 keep = cap->implemented;
1194 issue_seq = cap->issue_seq;
1196 size = inode->i_size;
1197 ci->i_reported_size = size;
1198 max_size = ci->i_wanted_max_size;
1199 ci->i_requested_max_size = max_size;
1200 mtime = inode->i_mtime;
1201 atime = inode->i_atime;
1202 ctime = inode->i_ctime;
1203 time_warp_seq = ci->i_time_warp_seq;
1206 mode = inode->i_mode;
1208 if (flushing & CEPH_CAP_XATTR_EXCL) {
1209 __ceph_build_xattrs_blob(ci);
1210 xattr_blob = ci->i_xattrs.blob;
1211 xattr_version = ci->i_xattrs.version;
1214 inline_data = ci->i_inline_version != CEPH_INLINE_NONE;
1216 spin_unlock(&ci->i_ceph_lock);
1218 ret = send_cap_msg(session, ceph_vino(inode).ino, cap_id,
1219 op, keep, want, flushing, seq,
1220 flush_tid, oldest_flush_tid, issue_seq, mseq,
1221 size, max_size, &mtime, &atime, &ctime, time_warp_seq,
1222 uid, gid, mode, xattr_version, xattr_blob,
1223 follows, inline_data);
1225 dout("error sending cap msg, must requeue %p\n", inode);
1230 wake_up_all(&ci->i_cap_wq);
1235 static inline int __send_flush_snap(struct inode *inode,
1236 struct ceph_mds_session *session,
1237 struct ceph_cap_snap *capsnap,
1238 u32 mseq, u64 oldest_flush_tid)
1240 return send_cap_msg(session, ceph_vino(inode).ino, 0,
1241 CEPH_CAP_OP_FLUSHSNAP, capsnap->issued, 0,
1242 capsnap->dirty, 0, capsnap->cap_flush.tid,
1243 oldest_flush_tid, 0, mseq, capsnap->size, 0,
1244 &capsnap->mtime, &capsnap->atime,
1245 &capsnap->ctime, capsnap->time_warp_seq,
1246 capsnap->uid, capsnap->gid, capsnap->mode,
1247 capsnap->xattr_version, capsnap->xattr_blob,
1248 capsnap->follows, capsnap->inline_data);
1252 * When a snapshot is taken, clients accumulate dirty metadata on
1253 * inodes with capabilities in ceph_cap_snaps to describe the file
1254 * state at the time the snapshot was taken. This must be flushed
1255 * asynchronously back to the MDS once sync writes complete and dirty
1256 * data is written out.
1258 * Called under i_ceph_lock. Takes s_mutex as needed.
1260 static void __ceph_flush_snaps(struct ceph_inode_info *ci,
1261 struct ceph_mds_session *session)
1262 __releases(ci->i_ceph_lock)
1263 __acquires(ci->i_ceph_lock)
1265 struct inode *inode = &ci->vfs_inode;
1266 struct ceph_mds_client *mdsc = session->s_mdsc;
1267 struct ceph_cap_snap *capsnap;
1268 u64 oldest_flush_tid = 0;
1269 u64 first_tid = 1, last_tid = 0;
1271 dout("__flush_snaps %p session %p\n", inode, session);
1273 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
1275 * we need to wait for sync writes to complete and for dirty
1276 * pages to be written out.
1278 if (capsnap->dirty_pages || capsnap->writing)
1281 /* should be removed by ceph_try_drop_cap_snap() */
1282 BUG_ON(!capsnap->need_flush);
1284 /* only flush each capsnap once */
1285 if (capsnap->cap_flush.tid > 0) {
1286 dout(" already flushed %p, skipping\n", capsnap);
1290 spin_lock(&mdsc->cap_dirty_lock);
1291 capsnap->cap_flush.tid = ++mdsc->last_cap_flush_tid;
1292 list_add_tail(&capsnap->cap_flush.g_list,
1293 &mdsc->cap_flush_list);
1294 if (oldest_flush_tid == 0)
1295 oldest_flush_tid = __get_oldest_flush_tid(mdsc);
1296 if (list_empty(&ci->i_flushing_item)) {
1297 list_add_tail(&ci->i_flushing_item,
1298 &session->s_cap_flushing);
1300 spin_unlock(&mdsc->cap_dirty_lock);
1302 list_add_tail(&capsnap->cap_flush.i_list,
1303 &ci->i_cap_flush_list);
1306 first_tid = capsnap->cap_flush.tid;
1307 last_tid = capsnap->cap_flush.tid;
1310 ci->i_ceph_flags &= ~CEPH_I_FLUSH_SNAPS;
1312 while (first_tid <= last_tid) {
1313 struct ceph_cap *cap = ci->i_auth_cap;
1314 struct ceph_cap_flush *cf;
1317 if (!(cap && cap->session == session)) {
1318 dout("__flush_snaps %p auth cap %p not mds%d, "
1319 "stop\n", inode, cap, session->s_mds);
1324 list_for_each_entry(cf, &ci->i_cap_flush_list, i_list) {
1325 if (cf->tid >= first_tid) {
1333 first_tid = cf->tid + 1;
1335 capsnap = container_of(cf, struct ceph_cap_snap, cap_flush);
1336 atomic_inc(&capsnap->nref);
1337 spin_unlock(&ci->i_ceph_lock);
1339 dout("__flush_snaps %p capsnap %p tid %llu %s\n",
1340 inode, capsnap, cf->tid, ceph_cap_string(capsnap->dirty));
1342 ret = __send_flush_snap(inode, session, capsnap, cap->mseq,
1345 pr_err("__flush_snaps: error sending cap flushsnap, "
1346 "ino (%llx.%llx) tid %llu follows %llu\n",
1347 ceph_vinop(inode), cf->tid, capsnap->follows);
1350 ceph_put_cap_snap(capsnap);
1351 spin_lock(&ci->i_ceph_lock);
1355 void ceph_flush_snaps(struct ceph_inode_info *ci,
1356 struct ceph_mds_session **psession)
1358 struct inode *inode = &ci->vfs_inode;
1359 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
1360 struct ceph_mds_session *session = NULL;
1363 dout("ceph_flush_snaps %p\n", inode);
1365 session = *psession;
1367 spin_lock(&ci->i_ceph_lock);
1368 if (!(ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)) {
1369 dout(" no capsnap needs flush, doing nothing\n");
1372 if (!ci->i_auth_cap) {
1373 dout(" no auth cap (migrating?), doing nothing\n");
1377 mds = ci->i_auth_cap->session->s_mds;
1378 if (session && session->s_mds != mds) {
1379 dout(" oops, wrong session %p mutex\n", session);
1380 mutex_unlock(&session->s_mutex);
1381 ceph_put_mds_session(session);
1385 spin_unlock(&ci->i_ceph_lock);
1386 mutex_lock(&mdsc->mutex);
1387 session = __ceph_lookup_mds_session(mdsc, mds);
1388 mutex_unlock(&mdsc->mutex);
1390 dout(" inverting session/ino locks on %p\n", session);
1391 mutex_lock(&session->s_mutex);
1396 __ceph_flush_snaps(ci, session);
1398 spin_unlock(&ci->i_ceph_lock);
1401 *psession = session;
1403 mutex_unlock(&session->s_mutex);
1404 ceph_put_mds_session(session);
1406 /* we flushed them all; remove this inode from the queue */
1407 spin_lock(&mdsc->snap_flush_lock);
1408 list_del_init(&ci->i_snap_flush_item);
1409 spin_unlock(&mdsc->snap_flush_lock);
1413 * Mark caps dirty. If inode is newly dirty, return the dirty flags.
1414 * Caller is then responsible for calling __mark_inode_dirty with the
1415 * returned flags value.
1417 int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
1418 struct ceph_cap_flush **pcf)
1420 struct ceph_mds_client *mdsc =
1421 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
1422 struct inode *inode = &ci->vfs_inode;
1423 int was = ci->i_dirty_caps;
1426 if (!ci->i_auth_cap) {
1427 pr_warn("__mark_dirty_caps %p %llx mask %s, "
1428 "but no auth cap (session was closed?)\n",
1429 inode, ceph_ino(inode), ceph_cap_string(mask));
1433 dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode,
1434 ceph_cap_string(mask), ceph_cap_string(was),
1435 ceph_cap_string(was | mask));
1436 ci->i_dirty_caps |= mask;
1438 WARN_ON_ONCE(ci->i_prealloc_cap_flush);
1439 swap(ci->i_prealloc_cap_flush, *pcf);
1441 if (!ci->i_head_snapc) {
1442 WARN_ON_ONCE(!rwsem_is_locked(&mdsc->snap_rwsem));
1443 ci->i_head_snapc = ceph_get_snap_context(
1444 ci->i_snap_realm->cached_context);
1446 dout(" inode %p now dirty snapc %p auth cap %p\n",
1447 &ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap);
1448 BUG_ON(!list_empty(&ci->i_dirty_item));
1449 spin_lock(&mdsc->cap_dirty_lock);
1450 list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
1451 spin_unlock(&mdsc->cap_dirty_lock);
1452 if (ci->i_flushing_caps == 0) {
1454 dirty |= I_DIRTY_SYNC;
1457 WARN_ON_ONCE(!ci->i_prealloc_cap_flush);
1459 BUG_ON(list_empty(&ci->i_dirty_item));
1460 if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) &&
1461 (mask & CEPH_CAP_FILE_BUFFER))
1462 dirty |= I_DIRTY_DATASYNC;
1463 __cap_delay_requeue(mdsc, ci);
1467 struct ceph_cap_flush *ceph_alloc_cap_flush(void)
1469 return kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
1472 void ceph_free_cap_flush(struct ceph_cap_flush *cf)
1475 kmem_cache_free(ceph_cap_flush_cachep, cf);
1478 static u64 __get_oldest_flush_tid(struct ceph_mds_client *mdsc)
1480 if (!list_empty(&mdsc->cap_flush_list)) {
1481 struct ceph_cap_flush *cf =
1482 list_first_entry(&mdsc->cap_flush_list,
1483 struct ceph_cap_flush, g_list);
1490 * Remove cap_flush from the mdsc's or inode's flushing cap list.
1491 * Return true if caller needs to wake up flush waiters.
1493 static bool __finish_cap_flush(struct ceph_mds_client *mdsc,
1494 struct ceph_inode_info *ci,
1495 struct ceph_cap_flush *cf)
1497 struct ceph_cap_flush *prev;
1498 bool wake = cf->wake;
1500 /* are there older pending cap flushes? */
1501 if (wake && cf->g_list.prev != &mdsc->cap_flush_list) {
1502 prev = list_prev_entry(cf, g_list);
1506 list_del(&cf->g_list);
1508 if (wake && cf->i_list.prev != &ci->i_cap_flush_list) {
1509 prev = list_prev_entry(cf, i_list);
1513 list_del(&cf->i_list);
1521 * Add dirty inode to the flushing list. Assigned a seq number so we
1522 * can wait for caps to flush without starving.
1524 * Called under i_ceph_lock.
1526 static int __mark_caps_flushing(struct inode *inode,
1527 struct ceph_mds_session *session, bool wake,
1528 u64 *flush_tid, u64 *oldest_flush_tid)
1530 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1531 struct ceph_inode_info *ci = ceph_inode(inode);
1532 struct ceph_cap_flush *cf = NULL;
1535 BUG_ON(ci->i_dirty_caps == 0);
1536 BUG_ON(list_empty(&ci->i_dirty_item));
1537 BUG_ON(!ci->i_prealloc_cap_flush);
1539 flushing = ci->i_dirty_caps;
1540 dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n",
1541 ceph_cap_string(flushing),
1542 ceph_cap_string(ci->i_flushing_caps),
1543 ceph_cap_string(ci->i_flushing_caps | flushing));
1544 ci->i_flushing_caps |= flushing;
1545 ci->i_dirty_caps = 0;
1546 dout(" inode %p now !dirty\n", inode);
1548 swap(cf, ci->i_prealloc_cap_flush);
1549 cf->caps = flushing;
1552 spin_lock(&mdsc->cap_dirty_lock);
1553 list_del_init(&ci->i_dirty_item);
1555 cf->tid = ++mdsc->last_cap_flush_tid;
1556 list_add_tail(&cf->g_list, &mdsc->cap_flush_list);
1557 *oldest_flush_tid = __get_oldest_flush_tid(mdsc);
1559 if (list_empty(&ci->i_flushing_item)) {
1560 list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1561 mdsc->num_cap_flushing++;
1563 spin_unlock(&mdsc->cap_dirty_lock);
1565 list_add_tail(&cf->i_list, &ci->i_cap_flush_list);
1567 *flush_tid = cf->tid;
1572 * try to invalidate mapping pages without blocking.
1574 static int try_nonblocking_invalidate(struct inode *inode)
1575 __releases(ci->i_ceph_lock)
1576 __acquires(ci->i_ceph_lock)
1578 struct ceph_inode_info *ci = ceph_inode(inode);
1579 u32 invalidating_gen = ci->i_rdcache_gen;
1581 spin_unlock(&ci->i_ceph_lock);
1582 ceph_fscache_invalidate(inode);
1583 invalidate_mapping_pages(&inode->i_data, 0, -1);
1584 spin_lock(&ci->i_ceph_lock);
1586 if (inode->i_data.nrpages == 0 &&
1587 invalidating_gen == ci->i_rdcache_gen) {
1589 dout("try_nonblocking_invalidate %p success\n", inode);
1590 /* save any racing async invalidate some trouble */
1591 ci->i_rdcache_revoking = ci->i_rdcache_gen - 1;
1594 dout("try_nonblocking_invalidate %p failed\n", inode);
1599 * Swiss army knife function to examine currently used and wanted
1600 * versus held caps. Release, flush, ack revoked caps to mds as
1603 * CHECK_CAPS_NODELAY - caller is delayed work and we should not delay
1604 * cap release further.
1605 * CHECK_CAPS_AUTHONLY - we should only check the auth cap
1606 * CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without
1609 void ceph_check_caps(struct ceph_inode_info *ci, int flags,
1610 struct ceph_mds_session *session)
1612 struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode);
1613 struct ceph_mds_client *mdsc = fsc->mdsc;
1614 struct inode *inode = &ci->vfs_inode;
1615 struct ceph_cap *cap;
1616 u64 flush_tid, oldest_flush_tid;
1617 int file_wanted, used, cap_used;
1618 int took_snap_rwsem = 0; /* true if mdsc->snap_rwsem held */
1619 int issued, implemented, want, retain, revoking, flushing = 0;
1620 int mds = -1; /* keep track of how far we've gone through i_caps list
1621 to avoid an infinite loop on retry */
1623 int delayed = 0, sent = 0, num;
1624 bool is_delayed = flags & CHECK_CAPS_NODELAY;
1625 bool queue_invalidate = false;
1626 bool force_requeue = false;
1627 bool tried_invalidate = false;
1629 /* if we are unmounting, flush any unused caps immediately. */
1633 spin_lock(&ci->i_ceph_lock);
1635 if (ci->i_ceph_flags & CEPH_I_FLUSH)
1636 flags |= CHECK_CAPS_FLUSH;
1640 spin_lock(&ci->i_ceph_lock);
1642 file_wanted = __ceph_caps_file_wanted(ci);
1643 used = __ceph_caps_used(ci);
1644 issued = __ceph_caps_issued(ci, &implemented);
1645 revoking = implemented & ~issued;
1648 retain = file_wanted | used | CEPH_CAP_PIN;
1649 if (!mdsc->stopping && inode->i_nlink > 0) {
1651 retain |= CEPH_CAP_ANY; /* be greedy */
1652 } else if (S_ISDIR(inode->i_mode) &&
1653 (issued & CEPH_CAP_FILE_SHARED) &&
1654 __ceph_dir_is_complete(ci)) {
1656 * If a directory is complete, we want to keep
1657 * the exclusive cap. So that MDS does not end up
1658 * revoking the shared cap on every create/unlink
1661 want = CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_EXCL;
1665 retain |= CEPH_CAP_ANY_SHARED;
1667 * keep RD only if we didn't have the file open RW,
1668 * because then the mds would revoke it anyway to
1669 * journal max_size=0.
1671 if (ci->i_max_size == 0)
1672 retain |= CEPH_CAP_ANY_RD;
1676 dout("check_caps %p file_want %s used %s dirty %s flushing %s"
1677 " issued %s revoking %s retain %s %s%s%s\n", inode,
1678 ceph_cap_string(file_wanted),
1679 ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
1680 ceph_cap_string(ci->i_flushing_caps),
1681 ceph_cap_string(issued), ceph_cap_string(revoking),
1682 ceph_cap_string(retain),
1683 (flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "",
1684 (flags & CHECK_CAPS_NODELAY) ? " NODELAY" : "",
1685 (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "");
1688 * If we no longer need to hold onto old our caps, and we may
1689 * have cached pages, but don't want them, then try to invalidate.
1690 * If we fail, it's because pages are locked.... try again later.
1692 if ((!is_delayed || mdsc->stopping) &&
1693 !S_ISDIR(inode->i_mode) && /* ignore readdir cache */
1694 !(ci->i_wb_ref || ci->i_wrbuffer_ref) && /* no dirty pages... */
1695 inode->i_data.nrpages && /* have cached pages */
1696 (revoking & (CEPH_CAP_FILE_CACHE|
1697 CEPH_CAP_FILE_LAZYIO)) && /* or revoking cache */
1698 !tried_invalidate) {
1699 dout("check_caps trying to invalidate on %p\n", inode);
1700 if (try_nonblocking_invalidate(inode) < 0) {
1701 if (revoking & (CEPH_CAP_FILE_CACHE|
1702 CEPH_CAP_FILE_LAZYIO)) {
1703 dout("check_caps queuing invalidate\n");
1704 queue_invalidate = true;
1705 ci->i_rdcache_revoking = ci->i_rdcache_gen;
1707 dout("check_caps failed to invalidate pages\n");
1708 /* we failed to invalidate pages. check these
1709 caps again later. */
1710 force_requeue = true;
1711 __cap_set_timeouts(mdsc, ci);
1714 tried_invalidate = true;
1719 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
1720 cap = rb_entry(p, struct ceph_cap, ci_node);
1723 /* avoid looping forever */
1724 if (mds >= cap->mds ||
1725 ((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap))
1728 /* NOTE: no side-effects allowed, until we take s_mutex */
1731 if (ci->i_auth_cap && cap != ci->i_auth_cap)
1732 cap_used &= ~ci->i_auth_cap->issued;
1734 revoking = cap->implemented & ~cap->issued;
1735 dout(" mds%d cap %p used %s issued %s implemented %s revoking %s\n",
1736 cap->mds, cap, ceph_cap_string(cap_used),
1737 ceph_cap_string(cap->issued),
1738 ceph_cap_string(cap->implemented),
1739 ceph_cap_string(revoking));
1741 if (cap == ci->i_auth_cap &&
1742 (cap->issued & CEPH_CAP_FILE_WR)) {
1743 /* request larger max_size from MDS? */
1744 if (ci->i_wanted_max_size > ci->i_max_size &&
1745 ci->i_wanted_max_size > ci->i_requested_max_size) {
1746 dout("requesting new max_size\n");
1750 /* approaching file_max? */
1751 if ((inode->i_size << 1) >= ci->i_max_size &&
1752 (ci->i_reported_size << 1) < ci->i_max_size) {
1753 dout("i_size approaching max_size\n");
1757 /* flush anything dirty? */
1758 if (cap == ci->i_auth_cap) {
1759 if ((flags & CHECK_CAPS_FLUSH) && ci->i_dirty_caps) {
1760 dout("flushing dirty caps\n");
1763 if (ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS) {
1764 dout("flushing snap caps\n");
1769 /* completed revocation? going down and there are no caps? */
1770 if (revoking && (revoking & cap_used) == 0) {
1771 dout("completed revocation of %s\n",
1772 ceph_cap_string(cap->implemented & ~cap->issued));
1776 /* want more caps from mds? */
1777 if (want & ~cap->mds_wanted) {
1778 if (want & ~(cap->mds_wanted | cap->issued))
1780 if (!__cap_is_valid(cap))
1784 /* things we might delay */
1785 if ((cap->issued & ~retain) == 0 &&
1786 cap->mds_wanted == want)
1787 continue; /* nope, all good */
1793 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1794 time_before(jiffies, ci->i_hold_caps_max)) {
1795 dout(" delaying issued %s -> %s, wanted %s -> %s\n",
1796 ceph_cap_string(cap->issued),
1797 ceph_cap_string(cap->issued & retain),
1798 ceph_cap_string(cap->mds_wanted),
1799 ceph_cap_string(want));
1805 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1806 dout(" skipping %p I_NOFLUSH set\n", inode);
1810 if (session && session != cap->session) {
1811 dout("oops, wrong session %p mutex\n", session);
1812 mutex_unlock(&session->s_mutex);
1816 session = cap->session;
1817 if (mutex_trylock(&session->s_mutex) == 0) {
1818 dout("inverting session/ino locks on %p\n",
1820 session = ceph_get_mds_session(session);
1821 spin_unlock(&ci->i_ceph_lock);
1822 if (took_snap_rwsem) {
1823 up_read(&mdsc->snap_rwsem);
1824 took_snap_rwsem = 0;
1827 mutex_lock(&session->s_mutex);
1828 ceph_put_mds_session(session);
1831 * Because we take the reference while
1832 * holding the i_ceph_lock, it should
1833 * never be NULL. Throw a warning if it
1842 /* kick flushing and flush snaps before sending normal
1844 if (cap == ci->i_auth_cap &&
1846 (CEPH_I_KICK_FLUSH | CEPH_I_FLUSH_SNAPS))) {
1847 if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH) {
1848 spin_lock(&mdsc->cap_dirty_lock);
1849 oldest_flush_tid = __get_oldest_flush_tid(mdsc);
1850 spin_unlock(&mdsc->cap_dirty_lock);
1851 __kick_flushing_caps(mdsc, session, ci,
1853 ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
1855 if (ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)
1856 __ceph_flush_snaps(ci, session);
1861 /* take snap_rwsem after session mutex */
1862 if (!took_snap_rwsem) {
1863 if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
1864 dout("inverting snap/in locks on %p\n",
1866 spin_unlock(&ci->i_ceph_lock);
1867 down_read(&mdsc->snap_rwsem);
1868 took_snap_rwsem = 1;
1871 took_snap_rwsem = 1;
1874 if (cap == ci->i_auth_cap && ci->i_dirty_caps) {
1875 flushing = __mark_caps_flushing(inode, session, false,
1881 spin_lock(&mdsc->cap_dirty_lock);
1882 oldest_flush_tid = __get_oldest_flush_tid(mdsc);
1883 spin_unlock(&mdsc->cap_dirty_lock);
1886 mds = cap->mds; /* remember mds, so we don't repeat */
1889 /* __send_cap drops i_ceph_lock */
1890 delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, cap_used,
1891 want, retain, flushing,
1892 flush_tid, oldest_flush_tid);
1893 goto retry; /* retake i_ceph_lock and restart our cap scan. */
1897 * Reschedule delayed caps release if we delayed anything,
1900 if (delayed && is_delayed)
1901 force_requeue = true; /* __send_cap delayed release; requeue */
1902 if (!delayed && !is_delayed)
1903 __cap_delay_cancel(mdsc, ci);
1904 else if (!is_delayed || force_requeue)
1905 __cap_delay_requeue(mdsc, ci);
1907 spin_unlock(&ci->i_ceph_lock);
1909 if (queue_invalidate)
1910 ceph_queue_invalidate(inode);
1913 mutex_unlock(&session->s_mutex);
1914 if (took_snap_rwsem)
1915 up_read(&mdsc->snap_rwsem);
1919 * Try to flush dirty caps back to the auth mds.
1921 static int try_flush_caps(struct inode *inode, u64 *ptid)
1923 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1924 struct ceph_inode_info *ci = ceph_inode(inode);
1925 struct ceph_mds_session *session = NULL;
1927 u64 flush_tid = 0, oldest_flush_tid = 0;
1930 spin_lock(&ci->i_ceph_lock);
1931 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1932 spin_unlock(&ci->i_ceph_lock);
1933 dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
1936 if (ci->i_dirty_caps && ci->i_auth_cap) {
1937 struct ceph_cap *cap = ci->i_auth_cap;
1938 int used = __ceph_caps_used(ci);
1939 int want = __ceph_caps_wanted(ci);
1942 if (!session || session != cap->session) {
1943 spin_unlock(&ci->i_ceph_lock);
1945 mutex_unlock(&session->s_mutex);
1946 session = cap->session;
1947 mutex_lock(&session->s_mutex);
1950 if (cap->session->s_state < CEPH_MDS_SESSION_OPEN) {
1951 spin_unlock(&ci->i_ceph_lock);
1955 flushing = __mark_caps_flushing(inode, session, true,
1956 &flush_tid, &oldest_flush_tid);
1958 /* __send_cap drops i_ceph_lock */
1959 delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, used, want,
1960 (cap->issued | cap->implemented),
1961 flushing, flush_tid, oldest_flush_tid);
1964 spin_lock(&ci->i_ceph_lock);
1965 __cap_delay_requeue(mdsc, ci);
1966 spin_unlock(&ci->i_ceph_lock);
1969 if (!list_empty(&ci->i_cap_flush_list)) {
1970 struct ceph_cap_flush *cf =
1971 list_last_entry(&ci->i_cap_flush_list,
1972 struct ceph_cap_flush, i_list);
1974 flush_tid = cf->tid;
1976 flushing = ci->i_flushing_caps;
1977 spin_unlock(&ci->i_ceph_lock);
1981 mutex_unlock(&session->s_mutex);
1988 * Return true if we've flushed caps through the given flush_tid.
1990 static int caps_are_flushed(struct inode *inode, u64 flush_tid)
1992 struct ceph_inode_info *ci = ceph_inode(inode);
1995 spin_lock(&ci->i_ceph_lock);
1996 if (!list_empty(&ci->i_cap_flush_list)) {
1997 struct ceph_cap_flush * cf =
1998 list_first_entry(&ci->i_cap_flush_list,
1999 struct ceph_cap_flush, i_list);
2000 if (cf->tid <= flush_tid)
2003 spin_unlock(&ci->i_ceph_lock);
2008 * wait for any unsafe requests to complete.
2010 static int unsafe_request_wait(struct inode *inode)
2012 struct ceph_inode_info *ci = ceph_inode(inode);
2013 struct ceph_mds_request *req1 = NULL, *req2 = NULL;
2016 spin_lock(&ci->i_unsafe_lock);
2017 if (S_ISDIR(inode->i_mode) && !list_empty(&ci->i_unsafe_dirops)) {
2018 req1 = list_last_entry(&ci->i_unsafe_dirops,
2019 struct ceph_mds_request,
2021 ceph_mdsc_get_request(req1);
2023 if (!list_empty(&ci->i_unsafe_iops)) {
2024 req2 = list_last_entry(&ci->i_unsafe_iops,
2025 struct ceph_mds_request,
2026 r_unsafe_target_item);
2027 ceph_mdsc_get_request(req2);
2029 spin_unlock(&ci->i_unsafe_lock);
2031 dout("unsafe_requeset_wait %p wait on tid %llu %llu\n",
2032 inode, req1 ? req1->r_tid : 0ULL, req2 ? req2->r_tid : 0ULL);
2034 ret = !wait_for_completion_timeout(&req1->r_safe_completion,
2035 ceph_timeout_jiffies(req1->r_timeout));
2038 ceph_mdsc_put_request(req1);
2041 ret = !wait_for_completion_timeout(&req2->r_safe_completion,
2042 ceph_timeout_jiffies(req2->r_timeout));
2045 ceph_mdsc_put_request(req2);
2050 int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2052 struct inode *inode = file->f_mapping->host;
2053 struct ceph_inode_info *ci = ceph_inode(inode);
2058 dout("fsync %p%s\n", inode, datasync ? " datasync" : "");
2060 ceph_sync_write_wait(inode);
2062 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
2071 dirty = try_flush_caps(inode, &flush_tid);
2072 dout("fsync dirty caps are %s\n", ceph_cap_string(dirty));
2074 ret = unsafe_request_wait(inode);
2077 * only wait on non-file metadata writeback (the mds
2078 * can recover size and mtime, so we don't need to
2081 if (!ret && (dirty & ~CEPH_CAP_ANY_FILE_WR)) {
2082 ret = wait_event_interruptible(ci->i_cap_wq,
2083 caps_are_flushed(inode, flush_tid));
2085 inode_unlock(inode);
2087 dout("fsync %p%s result=%d\n", inode, datasync ? " datasync" : "", ret);
2092 * Flush any dirty caps back to the mds. If we aren't asked to wait,
2093 * queue inode for flush but don't do so immediately, because we can
2094 * get by with fewer MDS messages if we wait for data writeback to
2097 int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
2099 struct ceph_inode_info *ci = ceph_inode(inode);
2103 int wait = wbc->sync_mode == WB_SYNC_ALL;
2105 dout("write_inode %p wait=%d\n", inode, wait);
2107 dirty = try_flush_caps(inode, &flush_tid);
2109 err = wait_event_interruptible(ci->i_cap_wq,
2110 caps_are_flushed(inode, flush_tid));
2112 struct ceph_mds_client *mdsc =
2113 ceph_sb_to_client(inode->i_sb)->mdsc;
2115 spin_lock(&ci->i_ceph_lock);
2116 if (__ceph_caps_dirty(ci))
2117 __cap_delay_requeue_front(mdsc, ci);
2118 spin_unlock(&ci->i_ceph_lock);
2123 static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
2124 struct ceph_mds_session *session,
2125 struct ceph_inode_info *ci,
2126 u64 oldest_flush_tid)
2127 __releases(ci->i_ceph_lock)
2128 __acquires(ci->i_ceph_lock)
2130 struct inode *inode = &ci->vfs_inode;
2131 struct ceph_cap *cap;
2132 struct ceph_cap_flush *cf;
2136 list_for_each_entry(cf, &ci->i_cap_flush_list, i_list) {
2137 if (cf->tid < first_tid)
2140 cap = ci->i_auth_cap;
2141 if (!(cap && cap->session == session)) {
2142 pr_err("%p auth cap %p not mds%d ???\n",
2143 inode, cap, session->s_mds);
2147 first_tid = cf->tid + 1;
2150 dout("kick_flushing_caps %p cap %p tid %llu %s\n",
2151 inode, cap, cf->tid, ceph_cap_string(cf->caps));
2152 ci->i_ceph_flags |= CEPH_I_NODELAY;
2153 ret = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
2154 __ceph_caps_used(ci),
2155 __ceph_caps_wanted(ci),
2156 cap->issued | cap->implemented,
2157 cf->caps, cf->tid, oldest_flush_tid);
2159 pr_err("kick_flushing_caps: error sending "
2160 "cap flush, ino (%llx.%llx) "
2161 "tid %llu flushing %s\n",
2162 ceph_vinop(inode), cf->tid,
2163 ceph_cap_string(cf->caps));
2166 struct ceph_cap_snap *capsnap =
2167 container_of(cf, struct ceph_cap_snap,
2169 dout("kick_flushing_caps %p capsnap %p tid %llu %s\n",
2170 inode, capsnap, cf->tid,
2171 ceph_cap_string(capsnap->dirty));
2173 atomic_inc(&capsnap->nref);
2174 spin_unlock(&ci->i_ceph_lock);
2176 ret = __send_flush_snap(inode, session, capsnap, cap->mseq,
2179 pr_err("kick_flushing_caps: error sending "
2180 "cap flushsnap, ino (%llx.%llx) "
2181 "tid %llu follows %llu\n",
2182 ceph_vinop(inode), cf->tid,
2186 ceph_put_cap_snap(capsnap);
2189 spin_lock(&ci->i_ceph_lock);
2193 void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
2194 struct ceph_mds_session *session)
2196 struct ceph_inode_info *ci;
2197 struct ceph_cap *cap;
2198 u64 oldest_flush_tid;
2200 dout("early_kick_flushing_caps mds%d\n", session->s_mds);
2202 spin_lock(&mdsc->cap_dirty_lock);
2203 oldest_flush_tid = __get_oldest_flush_tid(mdsc);
2204 spin_unlock(&mdsc->cap_dirty_lock);
2206 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
2207 spin_lock(&ci->i_ceph_lock);
2208 cap = ci->i_auth_cap;
2209 if (!(cap && cap->session == session)) {
2210 pr_err("%p auth cap %p not mds%d ???\n",
2211 &ci->vfs_inode, cap, session->s_mds);
2212 spin_unlock(&ci->i_ceph_lock);
2218 * if flushing caps were revoked, we re-send the cap flush
2219 * in client reconnect stage. This guarantees MDS * processes
2220 * the cap flush message before issuing the flushing caps to
2223 if ((cap->issued & ci->i_flushing_caps) !=
2224 ci->i_flushing_caps) {
2225 ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
2226 __kick_flushing_caps(mdsc, session, ci,
2229 ci->i_ceph_flags |= CEPH_I_KICK_FLUSH;
2232 spin_unlock(&ci->i_ceph_lock);
2236 void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
2237 struct ceph_mds_session *session)
2239 struct ceph_inode_info *ci;
2240 struct ceph_cap *cap;
2241 u64 oldest_flush_tid;
2243 dout("kick_flushing_caps mds%d\n", session->s_mds);
2245 spin_lock(&mdsc->cap_dirty_lock);
2246 oldest_flush_tid = __get_oldest_flush_tid(mdsc);
2247 spin_unlock(&mdsc->cap_dirty_lock);
2249 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
2250 spin_lock(&ci->i_ceph_lock);
2251 cap = ci->i_auth_cap;
2252 if (!(cap && cap->session == session)) {
2253 pr_err("%p auth cap %p not mds%d ???\n",
2254 &ci->vfs_inode, cap, session->s_mds);
2255 spin_unlock(&ci->i_ceph_lock);
2258 if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH) {
2259 ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
2260 __kick_flushing_caps(mdsc, session, ci,
2263 spin_unlock(&ci->i_ceph_lock);
2267 static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
2268 struct ceph_mds_session *session,
2269 struct inode *inode)
2270 __releases(ci->i_ceph_lock)
2272 struct ceph_inode_info *ci = ceph_inode(inode);
2273 struct ceph_cap *cap;
2275 cap = ci->i_auth_cap;
2276 dout("kick_flushing_inode_caps %p flushing %s\n", inode,
2277 ceph_cap_string(ci->i_flushing_caps));
2279 if (!list_empty(&ci->i_cap_flush_list)) {
2280 u64 oldest_flush_tid;
2281 spin_lock(&mdsc->cap_dirty_lock);
2282 list_move_tail(&ci->i_flushing_item,
2283 &cap->session->s_cap_flushing);
2284 oldest_flush_tid = __get_oldest_flush_tid(mdsc);
2285 spin_unlock(&mdsc->cap_dirty_lock);
2287 ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
2288 __kick_flushing_caps(mdsc, session, ci, oldest_flush_tid);
2289 spin_unlock(&ci->i_ceph_lock);
2291 spin_unlock(&ci->i_ceph_lock);
2297 * Take references to capabilities we hold, so that we don't release
2298 * them to the MDS prematurely.
2300 * Protected by i_ceph_lock.
2302 static void __take_cap_refs(struct ceph_inode_info *ci, int got,
2303 bool snap_rwsem_locked)
2305 if (got & CEPH_CAP_PIN)
2307 if (got & CEPH_CAP_FILE_RD)
2309 if (got & CEPH_CAP_FILE_CACHE)
2310 ci->i_rdcache_ref++;
2311 if (got & CEPH_CAP_FILE_WR) {
2312 if (ci->i_wr_ref == 0 && !ci->i_head_snapc) {
2313 BUG_ON(!snap_rwsem_locked);
2314 ci->i_head_snapc = ceph_get_snap_context(
2315 ci->i_snap_realm->cached_context);
2319 if (got & CEPH_CAP_FILE_BUFFER) {
2320 if (ci->i_wb_ref == 0)
2321 ihold(&ci->vfs_inode);
2323 dout("__take_cap_refs %p wb %d -> %d (?)\n",
2324 &ci->vfs_inode, ci->i_wb_ref-1, ci->i_wb_ref);
2329 * Try to grab cap references. Specify those refs we @want, and the
2330 * minimal set we @need. Also include the larger offset we are writing
2331 * to (when applicable), and check against max_size here as well.
2332 * Note that caller is responsible for ensuring max_size increases are
2333 * requested from the MDS.
2335 static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
2336 loff_t endoff, bool nonblock, int *got, int *err)
2338 struct inode *inode = &ci->vfs_inode;
2339 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
2341 int have, implemented;
2343 bool snap_rwsem_locked = false;
2345 dout("get_cap_refs %p need %s want %s\n", inode,
2346 ceph_cap_string(need), ceph_cap_string(want));
2349 spin_lock(&ci->i_ceph_lock);
2351 /* make sure file is actually open */
2352 file_wanted = __ceph_caps_file_wanted(ci);
2353 if ((file_wanted & need) != need) {
2354 dout("try_get_cap_refs need %s file_wanted %s, EBADF\n",
2355 ceph_cap_string(need), ceph_cap_string(file_wanted));
2361 /* finish pending truncate */
2362 while (ci->i_truncate_pending) {
2363 spin_unlock(&ci->i_ceph_lock);
2364 if (snap_rwsem_locked) {
2365 up_read(&mdsc->snap_rwsem);
2366 snap_rwsem_locked = false;
2368 __ceph_do_pending_vmtruncate(inode);
2369 spin_lock(&ci->i_ceph_lock);
2372 have = __ceph_caps_issued(ci, &implemented);
2374 if (have & need & CEPH_CAP_FILE_WR) {
2375 if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) {
2376 dout("get_cap_refs %p endoff %llu > maxsize %llu\n",
2377 inode, endoff, ci->i_max_size);
2378 if (endoff > ci->i_requested_max_size) {
2385 * If a sync write is in progress, we must wait, so that we
2386 * can get a final snapshot value for size+mtime.
2388 if (__ceph_have_pending_cap_snap(ci)) {
2389 dout("get_cap_refs %p cap_snap_pending\n", inode);
2394 if ((have & need) == need) {
2396 * Look at (implemented & ~have & not) so that we keep waiting
2397 * on transition from wanted -> needed caps. This is needed
2398 * for WRBUFFER|WR -> WR to avoid a new WR sync write from
2399 * going before a prior buffered writeback happens.
2401 int not = want & ~(have & need);
2402 int revoking = implemented & ~have;
2403 dout("get_cap_refs %p have %s but not %s (revoking %s)\n",
2404 inode, ceph_cap_string(have), ceph_cap_string(not),
2405 ceph_cap_string(revoking));
2406 if ((revoking & not) == 0) {
2407 if (!snap_rwsem_locked &&
2408 !ci->i_head_snapc &&
2409 (need & CEPH_CAP_FILE_WR)) {
2410 if (!down_read_trylock(&mdsc->snap_rwsem)) {
2412 * we can not call down_read() when
2413 * task isn't in TASK_RUNNING state
2421 spin_unlock(&ci->i_ceph_lock);
2422 down_read(&mdsc->snap_rwsem);
2423 snap_rwsem_locked = true;
2426 snap_rwsem_locked = true;
2428 *got = need | (have & want);
2429 if ((need & CEPH_CAP_FILE_RD) &&
2430 !(*got & CEPH_CAP_FILE_CACHE))
2431 ceph_disable_fscache_readpage(ci);
2432 __take_cap_refs(ci, *got, true);
2436 int session_readonly = false;
2437 if ((need & CEPH_CAP_FILE_WR) && ci->i_auth_cap) {
2438 struct ceph_mds_session *s = ci->i_auth_cap->session;
2439 spin_lock(&s->s_cap_lock);
2440 session_readonly = s->s_readonly;
2441 spin_unlock(&s->s_cap_lock);
2443 if (session_readonly) {
2444 dout("get_cap_refs %p needed %s but mds%d readonly\n",
2445 inode, ceph_cap_string(need), ci->i_auth_cap->mds);
2451 if (ci->i_ceph_flags & CEPH_I_CAP_DROPPED) {
2453 if (ACCESS_ONCE(mdsc->fsc->mount_state) ==
2454 CEPH_MOUNT_SHUTDOWN) {
2455 dout("get_cap_refs %p forced umount\n", inode);
2460 mds_wanted = __ceph_caps_mds_wanted(ci);
2461 if ((mds_wanted & need) != need) {
2462 dout("get_cap_refs %p caps were dropped"
2463 " (session killed?)\n", inode);
2468 if ((mds_wanted & file_wanted) ==
2469 (file_wanted & (CEPH_CAP_FILE_RD|CEPH_CAP_FILE_WR)))
2470 ci->i_ceph_flags &= ~CEPH_I_CAP_DROPPED;
2473 dout("get_cap_refs %p have %s needed %s\n", inode,
2474 ceph_cap_string(have), ceph_cap_string(need));
2477 spin_unlock(&ci->i_ceph_lock);
2478 if (snap_rwsem_locked)
2479 up_read(&mdsc->snap_rwsem);
2481 dout("get_cap_refs %p ret %d got %s\n", inode,
2482 ret, ceph_cap_string(*got));
2487 * Check the offset we are writing up to against our current
2488 * max_size. If necessary, tell the MDS we want to write to
2491 static void check_max_size(struct inode *inode, loff_t endoff)
2493 struct ceph_inode_info *ci = ceph_inode(inode);
2496 /* do we need to explicitly request a larger max_size? */
2497 spin_lock(&ci->i_ceph_lock);
2498 if (endoff >= ci->i_max_size && endoff > ci->i_wanted_max_size) {
2499 dout("write %p at large endoff %llu, req max_size\n",
2501 ci->i_wanted_max_size = endoff;
2503 /* duplicate ceph_check_caps()'s logic */
2504 if (ci->i_auth_cap &&
2505 (ci->i_auth_cap->issued & CEPH_CAP_FILE_WR) &&
2506 ci->i_wanted_max_size > ci->i_max_size &&
2507 ci->i_wanted_max_size > ci->i_requested_max_size)
2509 spin_unlock(&ci->i_ceph_lock);
2511 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2514 int ceph_try_get_caps(struct ceph_inode_info *ci, int need, int want, int *got)
2518 BUG_ON(need & ~CEPH_CAP_FILE_RD);
2519 BUG_ON(want & ~(CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO));
2520 ret = ceph_pool_perm_check(ci, need);
2524 ret = try_get_cap_refs(ci, need, want, 0, true, got, &err);
2526 if (err == -EAGAIN) {
2528 } else if (err < 0) {
2536 * Wait for caps, and take cap references. If we can't get a WR cap
2537 * due to a small max_size, make sure we check_max_size (and possibly
2538 * ask the mds) so we don't get hung up indefinitely.
2540 int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
2541 loff_t endoff, int *got, struct page **pinned_page)
2543 int _got, ret, err = 0;
2545 ret = ceph_pool_perm_check(ci, need);
2551 check_max_size(&ci->vfs_inode, endoff);
2555 ret = try_get_cap_refs(ci, need, want, endoff,
2556 false, &_got, &err);
2563 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2564 add_wait_queue(&ci->i_cap_wq, &wait);
2566 while (!try_get_cap_refs(ci, need, want, endoff,
2567 true, &_got, &err)) {
2568 if (signal_pending(current)) {
2572 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
2575 remove_wait_queue(&ci->i_cap_wq, &wait);
2583 if (err == -ESTALE) {
2584 /* session was killed, try renew caps */
2585 ret = ceph_renew_caps(&ci->vfs_inode);
2592 if (ci->i_inline_version != CEPH_INLINE_NONE &&
2593 (_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
2594 i_size_read(&ci->vfs_inode) > 0) {
2596 find_get_page(ci->vfs_inode.i_mapping, 0);
2598 if (PageUptodate(page)) {
2599 *pinned_page = page;
2605 * drop cap refs first because getattr while
2606 * holding * caps refs can cause deadlock.
2608 ceph_put_cap_refs(ci, _got);
2612 * getattr request will bring inline data into
2615 ret = __ceph_do_getattr(&ci->vfs_inode, NULL,
2616 CEPH_STAT_CAP_INLINE_DATA,
2625 if ((_got & CEPH_CAP_FILE_RD) && (_got & CEPH_CAP_FILE_CACHE))
2626 ceph_fscache_revalidate_cookie(ci);
2633 * Take cap refs. Caller must already know we hold at least one ref
2634 * on the caps in question or we don't know this is safe.
2636 void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
2638 spin_lock(&ci->i_ceph_lock);
2639 __take_cap_refs(ci, caps, false);
2640 spin_unlock(&ci->i_ceph_lock);
2645 * drop cap_snap that is not associated with any snapshot.
2646 * we don't need to send FLUSHSNAP message for it.
2648 static int ceph_try_drop_cap_snap(struct ceph_inode_info *ci,
2649 struct ceph_cap_snap *capsnap)
2651 if (!capsnap->need_flush &&
2652 !capsnap->writing && !capsnap->dirty_pages) {
2653 dout("dropping cap_snap %p follows %llu\n",
2654 capsnap, capsnap->follows);
2655 BUG_ON(capsnap->cap_flush.tid > 0);
2656 ceph_put_snap_context(capsnap->context);
2657 if (!list_is_last(&capsnap->ci_item, &ci->i_cap_snaps))
2658 ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS;
2660 list_del(&capsnap->ci_item);
2661 ceph_put_cap_snap(capsnap);
2670 * If we released the last ref on any given cap, call ceph_check_caps
2671 * to release (or schedule a release).
2673 * If we are releasing a WR cap (from a sync write), finalize any affected
2674 * cap_snap, and wake up any waiters.
2676 void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
2678 struct inode *inode = &ci->vfs_inode;
2679 int last = 0, put = 0, flushsnaps = 0, wake = 0;
2681 spin_lock(&ci->i_ceph_lock);
2682 if (had & CEPH_CAP_PIN)
2684 if (had & CEPH_CAP_FILE_RD)
2685 if (--ci->i_rd_ref == 0)
2687 if (had & CEPH_CAP_FILE_CACHE)
2688 if (--ci->i_rdcache_ref == 0)
2690 if (had & CEPH_CAP_FILE_BUFFER) {
2691 if (--ci->i_wb_ref == 0) {
2695 dout("put_cap_refs %p wb %d -> %d (?)\n",
2696 inode, ci->i_wb_ref+1, ci->i_wb_ref);
2698 if (had & CEPH_CAP_FILE_WR)
2699 if (--ci->i_wr_ref == 0) {
2701 if (__ceph_have_pending_cap_snap(ci)) {
2702 struct ceph_cap_snap *capsnap =
2703 list_last_entry(&ci->i_cap_snaps,
2704 struct ceph_cap_snap,
2706 capsnap->writing = 0;
2707 if (ceph_try_drop_cap_snap(ci, capsnap))
2709 else if (__ceph_finish_cap_snap(ci, capsnap))
2713 if (ci->i_wrbuffer_ref_head == 0 &&
2714 ci->i_dirty_caps == 0 &&
2715 ci->i_flushing_caps == 0) {
2716 BUG_ON(!ci->i_head_snapc);
2717 ceph_put_snap_context(ci->i_head_snapc);
2718 ci->i_head_snapc = NULL;
2720 /* see comment in __ceph_remove_cap() */
2721 if (!__ceph_is_any_caps(ci) && ci->i_snap_realm)
2722 drop_inode_snap_realm(ci);
2724 spin_unlock(&ci->i_ceph_lock);
2726 dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
2727 last ? " last" : "", put ? " put" : "");
2729 if (last && !flushsnaps)
2730 ceph_check_caps(ci, 0, NULL);
2731 else if (flushsnaps)
2732 ceph_flush_snaps(ci, NULL);
2734 wake_up_all(&ci->i_cap_wq);
2740 * Release @nr WRBUFFER refs on dirty pages for the given @snapc snap
2741 * context. Adjust per-snap dirty page accounting as appropriate.
2742 * Once all dirty data for a cap_snap is flushed, flush snapped file
2743 * metadata back to the MDS. If we dropped the last ref, call
2746 void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2747 struct ceph_snap_context *snapc)
2749 struct inode *inode = &ci->vfs_inode;
2750 struct ceph_cap_snap *capsnap = NULL;
2754 bool flush_snaps = false;
2755 bool complete_capsnap = false;
2757 spin_lock(&ci->i_ceph_lock);
2758 ci->i_wrbuffer_ref -= nr;
2759 if (ci->i_wrbuffer_ref == 0) {
2764 if (ci->i_head_snapc == snapc) {
2765 ci->i_wrbuffer_ref_head -= nr;
2766 if (ci->i_wrbuffer_ref_head == 0 &&
2767 ci->i_wr_ref == 0 &&
2768 ci->i_dirty_caps == 0 &&
2769 ci->i_flushing_caps == 0) {
2770 BUG_ON(!ci->i_head_snapc);
2771 ceph_put_snap_context(ci->i_head_snapc);
2772 ci->i_head_snapc = NULL;
2774 dout("put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n",
2776 ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr,
2777 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
2778 last ? " LAST" : "");
2780 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2781 if (capsnap->context == snapc) {
2787 capsnap->dirty_pages -= nr;
2788 if (capsnap->dirty_pages == 0) {
2789 complete_capsnap = true;
2790 if (!capsnap->writing) {
2791 if (ceph_try_drop_cap_snap(ci, capsnap)) {
2794 ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS;
2799 dout("put_wrbuffer_cap_refs on %p cap_snap %p "
2800 " snap %lld %d/%d -> %d/%d %s%s\n",
2801 inode, capsnap, capsnap->context->seq,
2802 ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
2803 ci->i_wrbuffer_ref, capsnap->dirty_pages,
2804 last ? " (wrbuffer last)" : "",
2805 complete_capsnap ? " (complete capsnap)" : "");
2808 spin_unlock(&ci->i_ceph_lock);
2811 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2812 } else if (flush_snaps) {
2813 ceph_flush_snaps(ci, NULL);
2815 if (complete_capsnap)
2816 wake_up_all(&ci->i_cap_wq);
2822 * Invalidate unlinked inode's aliases, so we can drop the inode ASAP.
2824 static void invalidate_aliases(struct inode *inode)
2826 struct dentry *dn, *prev = NULL;
2828 dout("invalidate_aliases inode %p\n", inode);
2829 d_prune_aliases(inode);
2831 * For non-directory inode, d_find_alias() only returns
2832 * hashed dentry. After calling d_invalidate(), the
2833 * dentry becomes unhashed.
2835 * For directory inode, d_find_alias() can return
2836 * unhashed dentry. But directory inode should have
2837 * one alias at most.
2839 while ((dn = d_find_alias(inode))) {
2854 * Handle a cap GRANT message from the MDS. (Note that a GRANT may
2855 * actually be a revocation if it specifies a smaller cap set.)
2857 * caller holds s_mutex and i_ceph_lock, we drop both.
2859 static void handle_cap_grant(struct ceph_mds_client *mdsc,
2860 struct inode *inode, struct ceph_mds_caps *grant,
2861 struct ceph_string **pns, u64 inline_version,
2862 void *inline_data, u32 inline_len,
2863 struct ceph_buffer *xattr_buf,
2864 struct ceph_mds_session *session,
2865 struct ceph_cap *cap, int issued)
2866 __releases(ci->i_ceph_lock)
2867 __releases(mdsc->snap_rwsem)
2869 struct ceph_inode_info *ci = ceph_inode(inode);
2870 int mds = session->s_mds;
2871 int seq = le32_to_cpu(grant->seq);
2872 int newcaps = le32_to_cpu(grant->caps);
2873 int used, wanted, dirty;
2874 u64 size = le64_to_cpu(grant->size);
2875 u64 max_size = le64_to_cpu(grant->max_size);
2876 struct timespec mtime, atime, ctime;
2879 bool writeback = false;
2880 bool queue_trunc = false;
2881 bool queue_invalidate = false;
2882 bool deleted_inode = false;
2883 bool fill_inline = false;
2885 dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
2886 inode, cap, mds, seq, ceph_cap_string(newcaps));
2887 dout(" size %llu max_size %llu, i_size %llu\n", size, max_size,
2892 * auth mds of the inode changed. we received the cap export message,
2893 * but still haven't received the cap import message. handle_cap_export
2894 * updated the new auth MDS' cap.
2896 * "ceph_seq_cmp(seq, cap->seq) <= 0" means we are processing a message
2897 * that was sent before the cap import message. So don't remove caps.
2899 if (ceph_seq_cmp(seq, cap->seq) <= 0) {
2900 WARN_ON(cap != ci->i_auth_cap);
2901 WARN_ON(cap->cap_id != le64_to_cpu(grant->cap_id));
2903 newcaps |= cap->issued;
2907 * If CACHE is being revoked, and we have no dirty buffers,
2908 * try to invalidate (once). (If there are dirty buffers, we
2909 * will invalidate _after_ writeback.)
2911 if (!S_ISDIR(inode->i_mode) && /* don't invalidate readdir cache */
2912 ((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) &&
2913 (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
2914 !(ci->i_wrbuffer_ref || ci->i_wb_ref)) {
2915 if (try_nonblocking_invalidate(inode)) {
2916 /* there were locked pages.. invalidate later
2917 in a separate thread. */
2918 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
2919 queue_invalidate = true;
2920 ci->i_rdcache_revoking = ci->i_rdcache_gen;
2925 /* side effects now are allowed */
2926 cap->cap_gen = session->s_cap_gen;
2929 __check_cap_issue(ci, cap, newcaps);
2931 if ((newcaps & CEPH_CAP_AUTH_SHARED) &&
2932 (issued & CEPH_CAP_AUTH_EXCL) == 0) {
2933 inode->i_mode = le32_to_cpu(grant->mode);
2934 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(grant->uid));
2935 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(grant->gid));
2936 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
2937 from_kuid(&init_user_ns, inode->i_uid),
2938 from_kgid(&init_user_ns, inode->i_gid));
2941 if ((newcaps & CEPH_CAP_AUTH_SHARED) &&
2942 (issued & CEPH_CAP_LINK_EXCL) == 0) {
2943 set_nlink(inode, le32_to_cpu(grant->nlink));
2944 if (inode->i_nlink == 0 &&
2945 (newcaps & (CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL)))
2946 deleted_inode = true;
2949 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && grant->xattr_len) {
2950 int len = le32_to_cpu(grant->xattr_len);
2951 u64 version = le64_to_cpu(grant->xattr_version);
2953 if (version > ci->i_xattrs.version) {
2954 dout(" got new xattrs v%llu on %p len %d\n",
2955 version, inode, len);
2956 if (ci->i_xattrs.blob)
2957 ceph_buffer_put(ci->i_xattrs.blob);
2958 ci->i_xattrs.blob = ceph_buffer_get(xattr_buf);
2959 ci->i_xattrs.version = version;
2960 ceph_forget_all_cached_acls(inode);
2964 if (newcaps & CEPH_CAP_ANY_RD) {
2965 /* ctime/mtime/atime? */
2966 ceph_decode_timespec(&mtime, &grant->mtime);
2967 ceph_decode_timespec(&atime, &grant->atime);
2968 ceph_decode_timespec(&ctime, &grant->ctime);
2969 ceph_fill_file_time(inode, issued,
2970 le32_to_cpu(grant->time_warp_seq),
2971 &ctime, &mtime, &atime);
2974 if (newcaps & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR)) {
2975 /* file layout may have changed */
2976 s64 old_pool = ci->i_layout.pool_id;
2977 struct ceph_string *old_ns;
2979 ceph_file_layout_from_legacy(&ci->i_layout, &grant->layout);
2980 old_ns = rcu_dereference_protected(ci->i_layout.pool_ns,
2981 lockdep_is_held(&ci->i_ceph_lock));
2982 rcu_assign_pointer(ci->i_layout.pool_ns, *pns);
2984 if (ci->i_layout.pool_id != old_pool || *pns != old_ns)
2985 ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
2989 /* size/truncate_seq? */
2990 queue_trunc = ceph_fill_file_size(inode, issued,
2991 le32_to_cpu(grant->truncate_seq),
2992 le64_to_cpu(grant->truncate_size),
2994 /* max size increase? */
2995 if (ci->i_auth_cap == cap && max_size != ci->i_max_size) {
2996 dout("max_size %lld -> %llu\n",
2997 ci->i_max_size, max_size);
2998 ci->i_max_size = max_size;
2999 if (max_size >= ci->i_wanted_max_size) {
3000 ci->i_wanted_max_size = 0; /* reset */
3001 ci->i_requested_max_size = 0;
3007 /* check cap bits */
3008 wanted = __ceph_caps_wanted(ci);
3009 used = __ceph_caps_used(ci);
3010 dirty = __ceph_caps_dirty(ci);
3011 dout(" my wanted = %s, used = %s, dirty %s\n",
3012 ceph_cap_string(wanted),
3013 ceph_cap_string(used),
3014 ceph_cap_string(dirty));
3015 if (wanted != le32_to_cpu(grant->wanted)) {
3016 dout("mds wanted %s -> %s\n",
3017 ceph_cap_string(le32_to_cpu(grant->wanted)),
3018 ceph_cap_string(wanted));
3019 /* imported cap may not have correct mds_wanted */
3020 if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT)
3024 /* revocation, grant, or no-op? */
3025 if (cap->issued & ~newcaps) {
3026 int revoking = cap->issued & ~newcaps;
3028 dout("revocation: %s -> %s (revoking %s)\n",
3029 ceph_cap_string(cap->issued),
3030 ceph_cap_string(newcaps),
3031 ceph_cap_string(revoking));
3032 if (revoking & used & CEPH_CAP_FILE_BUFFER)
3033 writeback = true; /* initiate writeback; will delay ack */
3034 else if (revoking == CEPH_CAP_FILE_CACHE &&
3035 (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
3037 ; /* do nothing yet, invalidation will be queued */
3038 else if (cap == ci->i_auth_cap)
3039 check_caps = 1; /* check auth cap only */
3041 check_caps = 2; /* check all caps */
3042 cap->issued = newcaps;
3043 cap->implemented |= newcaps;
3044 } else if (cap->issued == newcaps) {
3045 dout("caps unchanged: %s -> %s\n",
3046 ceph_cap_string(cap->issued), ceph_cap_string(newcaps));
3048 dout("grant: %s -> %s\n", ceph_cap_string(cap->issued),
3049 ceph_cap_string(newcaps));
3050 /* non-auth MDS is revoking the newly grant caps ? */
3051 if (cap == ci->i_auth_cap &&
3052 __ceph_caps_revoking_other(ci, cap, newcaps))
3055 cap->issued = newcaps;
3056 cap->implemented |= newcaps; /* add bits only, to
3057 * avoid stepping on a
3058 * pending revocation */
3061 BUG_ON(cap->issued & ~cap->implemented);
3063 if (inline_version > 0 && inline_version >= ci->i_inline_version) {
3064 ci->i_inline_version = inline_version;
3065 if (ci->i_inline_version != CEPH_INLINE_NONE &&
3066 (newcaps & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)))
3070 if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) {
3071 if (newcaps & ~issued)
3073 kick_flushing_inode_caps(mdsc, session, inode);
3074 up_read(&mdsc->snap_rwsem);
3076 spin_unlock(&ci->i_ceph_lock);
3080 ceph_fill_inline_data(inode, NULL, inline_data, inline_len);
3083 ceph_queue_vmtruncate(inode);
3087 * queue inode for writeback: we can't actually call
3088 * filemap_write_and_wait, etc. from message handler
3091 ceph_queue_writeback(inode);
3092 if (queue_invalidate)
3093 ceph_queue_invalidate(inode);
3095 invalidate_aliases(inode);
3097 wake_up_all(&ci->i_cap_wq);
3099 if (check_caps == 1)
3100 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY,
3102 else if (check_caps == 2)
3103 ceph_check_caps(ci, CHECK_CAPS_NODELAY, session);
3105 mutex_unlock(&session->s_mutex);
3109 * Handle FLUSH_ACK from MDS, indicating that metadata we sent to the
3110 * MDS has been safely committed.
3112 static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
3113 struct ceph_mds_caps *m,
3114 struct ceph_mds_session *session,
3115 struct ceph_cap *cap)
3116 __releases(ci->i_ceph_lock)
3118 struct ceph_inode_info *ci = ceph_inode(inode);
3119 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
3120 struct ceph_cap_flush *cf, *tmp_cf;
3121 LIST_HEAD(to_remove);
3122 unsigned seq = le32_to_cpu(m->seq);
3123 int dirty = le32_to_cpu(m->dirty);
3129 list_for_each_entry_safe(cf, tmp_cf, &ci->i_cap_flush_list, i_list) {
3130 if (cf->tid == flush_tid)
3132 if (cf->caps == 0) /* capsnap */
3134 if (cf->tid <= flush_tid) {
3135 if (__finish_cap_flush(NULL, ci, cf))
3137 list_add_tail(&cf->i_list, &to_remove);
3139 cleaned &= ~cf->caps;
3145 dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s,"
3146 " flushing %s -> %s\n",
3147 inode, session->s_mds, seq, ceph_cap_string(dirty),
3148 ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps),
3149 ceph_cap_string(ci->i_flushing_caps & ~cleaned));
3151 if (list_empty(&to_remove) && !cleaned)
3154 ci->i_flushing_caps &= ~cleaned;
3156 spin_lock(&mdsc->cap_dirty_lock);
3158 list_for_each_entry(cf, &to_remove, i_list) {
3159 if (__finish_cap_flush(mdsc, NULL, cf))
3163 if (ci->i_flushing_caps == 0) {
3164 if (list_empty(&ci->i_cap_flush_list)) {
3165 list_del_init(&ci->i_flushing_item);
3166 if (!list_empty(&session->s_cap_flushing)) {
3167 dout(" mds%d still flushing cap on %p\n",
3169 &list_first_entry(&session->s_cap_flushing,
3170 struct ceph_inode_info,
3171 i_flushing_item)->vfs_inode);
3174 mdsc->num_cap_flushing--;
3175 dout(" inode %p now !flushing\n", inode);
3177 if (ci->i_dirty_caps == 0) {
3178 dout(" inode %p now clean\n", inode);
3179 BUG_ON(!list_empty(&ci->i_dirty_item));
3181 if (ci->i_wr_ref == 0 &&
3182 ci->i_wrbuffer_ref_head == 0) {
3183 BUG_ON(!ci->i_head_snapc);
3184 ceph_put_snap_context(ci->i_head_snapc);
3185 ci->i_head_snapc = NULL;
3188 BUG_ON(list_empty(&ci->i_dirty_item));
3191 spin_unlock(&mdsc->cap_dirty_lock);
3194 spin_unlock(&ci->i_ceph_lock);
3196 while (!list_empty(&to_remove)) {
3197 cf = list_first_entry(&to_remove,
3198 struct ceph_cap_flush, i_list);
3199 list_del(&cf->i_list);
3200 ceph_free_cap_flush(cf);
3204 wake_up_all(&ci->i_cap_wq);
3206 wake_up_all(&mdsc->cap_flushing_wq);
3212 * Handle FLUSHSNAP_ACK. MDS has flushed snap data to disk and we can
3213 * throw away our cap_snap.
3215 * Caller hold s_mutex.
3217 static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
3218 struct ceph_mds_caps *m,
3219 struct ceph_mds_session *session)
3221 struct ceph_inode_info *ci = ceph_inode(inode);
3222 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
3223 u64 follows = le64_to_cpu(m->snap_follows);
3224 struct ceph_cap_snap *capsnap;
3225 bool flushed = false;
3226 bool wake_ci = false;
3227 bool wake_mdsc = false;
3229 dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
3230 inode, ci, session->s_mds, follows);
3232 spin_lock(&ci->i_ceph_lock);
3233 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
3234 if (capsnap->follows == follows) {
3235 if (capsnap->cap_flush.tid != flush_tid) {
3236 dout(" cap_snap %p follows %lld tid %lld !="
3237 " %lld\n", capsnap, follows,
3238 flush_tid, capsnap->cap_flush.tid);
3244 dout(" skipping cap_snap %p follows %lld\n",
3245 capsnap, capsnap->follows);
3249 WARN_ON(capsnap->dirty_pages || capsnap->writing);
3250 dout(" removing %p cap_snap %p follows %lld\n",
3251 inode, capsnap, follows);
3252 list_del(&capsnap->ci_item);
3253 if (__finish_cap_flush(NULL, ci, &capsnap->cap_flush))
3256 spin_lock(&mdsc->cap_dirty_lock);
3258 if (list_empty(&ci->i_cap_flush_list))
3259 list_del_init(&ci->i_flushing_item);
3261 if (__finish_cap_flush(mdsc, NULL, &capsnap->cap_flush))
3264 spin_unlock(&mdsc->cap_dirty_lock);
3266 spin_unlock(&ci->i_ceph_lock);
3268 ceph_put_snap_context(capsnap->context);
3269 ceph_put_cap_snap(capsnap);
3271 wake_up_all(&ci->i_cap_wq);
3273 wake_up_all(&mdsc->cap_flushing_wq);
3279 * Handle TRUNC from MDS, indicating file truncation.
3281 * caller hold s_mutex.
3283 static void handle_cap_trunc(struct inode *inode,
3284 struct ceph_mds_caps *trunc,
3285 struct ceph_mds_session *session)
3286 __releases(ci->i_ceph_lock)
3288 struct ceph_inode_info *ci = ceph_inode(inode);
3289 int mds = session->s_mds;
3290 int seq = le32_to_cpu(trunc->seq);
3291 u32 truncate_seq = le32_to_cpu(trunc->truncate_seq);
3292 u64 truncate_size = le64_to_cpu(trunc->truncate_size);
3293 u64 size = le64_to_cpu(trunc->size);
3294 int implemented = 0;
3295 int dirty = __ceph_caps_dirty(ci);
3296 int issued = __ceph_caps_issued(ceph_inode(inode), &implemented);
3297 int queue_trunc = 0;
3299 issued |= implemented | dirty;
3301 dout("handle_cap_trunc inode %p mds%d seq %d to %lld seq %d\n",
3302 inode, mds, seq, truncate_size, truncate_seq);
3303 queue_trunc = ceph_fill_file_size(inode, issued,
3304 truncate_seq, truncate_size, size);
3305 spin_unlock(&ci->i_ceph_lock);
3308 ceph_queue_vmtruncate(inode);
3312 * Handle EXPORT from MDS. Cap is being migrated _from_ this mds to a
3313 * different one. If we are the most recent migration we've seen (as
3314 * indicated by mseq), make note of the migrating cap bits for the
3315 * duration (until we see the corresponding IMPORT).
3317 * caller holds s_mutex
3319 static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
3320 struct ceph_mds_cap_peer *ph,
3321 struct ceph_mds_session *session)
3323 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
3324 struct ceph_mds_session *tsession = NULL;
3325 struct ceph_cap *cap, *tcap, *new_cap = NULL;
3326 struct ceph_inode_info *ci = ceph_inode(inode);
3328 unsigned mseq = le32_to_cpu(ex->migrate_seq);
3329 unsigned t_seq, t_mseq;
3331 int mds = session->s_mds;
3334 t_cap_id = le64_to_cpu(ph->cap_id);
3335 t_seq = le32_to_cpu(ph->seq);
3336 t_mseq = le32_to_cpu(ph->mseq);
3337 target = le32_to_cpu(ph->mds);
3339 t_cap_id = t_seq = t_mseq = 0;
3343 dout("handle_cap_export inode %p ci %p mds%d mseq %d target %d\n",
3344 inode, ci, mds, mseq, target);
3346 spin_lock(&ci->i_ceph_lock);
3347 cap = __get_cap_for_mds(ci, mds);
3348 if (!cap || cap->cap_id != le64_to_cpu(ex->cap_id))
3352 __ceph_remove_cap(cap, false);
3353 if (!ci->i_auth_cap)
3354 ci->i_ceph_flags |= CEPH_I_CAP_DROPPED;
3359 * now we know we haven't received the cap import message yet
3360 * because the exported cap still exist.
3363 issued = cap->issued;
3364 WARN_ON(issued != cap->implemented);
3366 tcap = __get_cap_for_mds(ci, target);
3368 /* already have caps from the target */
3369 if (tcap->cap_id != t_cap_id ||
3370 ceph_seq_cmp(tcap->seq, t_seq) < 0) {
3371 dout(" updating import cap %p mds%d\n", tcap, target);
3372 tcap->cap_id = t_cap_id;
3373 tcap->seq = t_seq - 1;
3374 tcap->issue_seq = t_seq - 1;
3375 tcap->issued |= issued;
3376 tcap->implemented |= issued;
3377 if (cap == ci->i_auth_cap)
3378 ci->i_auth_cap = tcap;
3379 if (!list_empty(&ci->i_cap_flush_list) &&
3380 ci->i_auth_cap == tcap) {
3381 spin_lock(&mdsc->cap_dirty_lock);
3382 list_move_tail(&ci->i_flushing_item,
3383 &tcap->session->s_cap_flushing);
3384 spin_unlock(&mdsc->cap_dirty_lock);
3387 __ceph_remove_cap(cap, false);
3389 } else if (tsession) {
3390 /* add placeholder for the export tagert */
3391 int flag = (cap == ci->i_auth_cap) ? CEPH_CAP_FLAG_AUTH : 0;
3392 ceph_add_cap(inode, tsession, t_cap_id, -1, issued, 0,
3393 t_seq - 1, t_mseq, (u64)-1, flag, &new_cap);
3395 __ceph_remove_cap(cap, false);
3399 spin_unlock(&ci->i_ceph_lock);
3400 mutex_unlock(&session->s_mutex);
3402 /* open target session */
3403 tsession = ceph_mdsc_open_export_target_session(mdsc, target);
3404 if (!IS_ERR(tsession)) {
3406 mutex_lock(&session->s_mutex);
3407 mutex_lock_nested(&tsession->s_mutex,
3408 SINGLE_DEPTH_NESTING);
3410 mutex_lock(&tsession->s_mutex);
3411 mutex_lock_nested(&session->s_mutex,
3412 SINGLE_DEPTH_NESTING);
3414 new_cap = ceph_get_cap(mdsc, NULL);
3419 mutex_lock(&session->s_mutex);
3424 spin_unlock(&ci->i_ceph_lock);
3425 mutex_unlock(&session->s_mutex);
3427 mutex_unlock(&tsession->s_mutex);
3428 ceph_put_mds_session(tsession);
3431 ceph_put_cap(mdsc, new_cap);
3435 * Handle cap IMPORT.
3437 * caller holds s_mutex. acquires i_ceph_lock
3439 static void handle_cap_import(struct ceph_mds_client *mdsc,
3440 struct inode *inode, struct ceph_mds_caps *im,
3441 struct ceph_mds_cap_peer *ph,
3442 struct ceph_mds_session *session,
3443 struct ceph_cap **target_cap, int *old_issued)
3444 __acquires(ci->i_ceph_lock)
3446 struct ceph_inode_info *ci = ceph_inode(inode);
3447 struct ceph_cap *cap, *ocap, *new_cap = NULL;
3448 int mds = session->s_mds;
3450 unsigned caps = le32_to_cpu(im->caps);
3451 unsigned wanted = le32_to_cpu(im->wanted);
3452 unsigned seq = le32_to_cpu(im->seq);
3453 unsigned mseq = le32_to_cpu(im->migrate_seq);
3454 u64 realmino = le64_to_cpu(im->realm);
3455 u64 cap_id = le64_to_cpu(im->cap_id);
3460 p_cap_id = le64_to_cpu(ph->cap_id);
3461 peer = le32_to_cpu(ph->mds);
3467 dout("handle_cap_import inode %p ci %p mds%d mseq %d peer %d\n",
3468 inode, ci, mds, mseq, peer);
3471 spin_lock(&ci->i_ceph_lock);
3472 cap = __get_cap_for_mds(ci, mds);
3475 spin_unlock(&ci->i_ceph_lock);
3476 new_cap = ceph_get_cap(mdsc, NULL);
3482 ceph_put_cap(mdsc, new_cap);
3487 __ceph_caps_issued(ci, &issued);
3488 issued |= __ceph_caps_dirty(ci);
3490 ceph_add_cap(inode, session, cap_id, -1, caps, wanted, seq, mseq,
3491 realmino, CEPH_CAP_FLAG_AUTH, &new_cap);
3493 ocap = peer >= 0 ? __get_cap_for_mds(ci, peer) : NULL;
3494 if (ocap && ocap->cap_id == p_cap_id) {
3495 dout(" remove export cap %p mds%d flags %d\n",
3496 ocap, peer, ph->flags);
3497 if ((ph->flags & CEPH_CAP_FLAG_AUTH) &&
3498 (ocap->seq != le32_to_cpu(ph->seq) ||
3499 ocap->mseq != le32_to_cpu(ph->mseq))) {
3500 pr_err("handle_cap_import: mismatched seq/mseq: "
3501 "ino (%llx.%llx) mds%d seq %d mseq %d "
3502 "importer mds%d has peer seq %d mseq %d\n",
3503 ceph_vinop(inode), peer, ocap->seq,
3504 ocap->mseq, mds, le32_to_cpu(ph->seq),
3505 le32_to_cpu(ph->mseq));
3507 __ceph_remove_cap(ocap, (ph->flags & CEPH_CAP_FLAG_RELEASE));
3510 /* make sure we re-request max_size, if necessary */
3511 ci->i_wanted_max_size = 0;
3512 ci->i_requested_max_size = 0;
3514 *old_issued = issued;
3519 * Handle a caps message from the MDS.
3521 * Identify the appropriate session, inode, and call the right handler
3522 * based on the cap op.
3524 void ceph_handle_caps(struct ceph_mds_session *session,
3525 struct ceph_msg *msg)
3527 struct ceph_mds_client *mdsc = session->s_mdsc;
3528 struct super_block *sb = mdsc->fsc->sb;
3529 struct inode *inode;
3530 struct ceph_inode_info *ci;
3531 struct ceph_cap *cap;
3532 struct ceph_mds_caps *h;
3533 struct ceph_mds_cap_peer *peer = NULL;
3534 struct ceph_snap_realm *realm = NULL;
3535 struct ceph_string *pool_ns = NULL;
3536 int mds = session->s_mds;
3539 struct ceph_vino vino;
3541 u64 inline_version = 0;
3542 void *inline_data = NULL;
3545 size_t snaptrace_len;
3548 dout("handle_caps from mds%d\n", mds);
3551 end = msg->front.iov_base + msg->front.iov_len;
3552 tid = le64_to_cpu(msg->hdr.tid);
3553 if (msg->front.iov_len < sizeof(*h))
3555 h = msg->front.iov_base;
3556 op = le32_to_cpu(h->op);
3557 vino.ino = le64_to_cpu(h->ino);
3558 vino.snap = CEPH_NOSNAP;
3559 seq = le32_to_cpu(h->seq);
3560 mseq = le32_to_cpu(h->migrate_seq);
3563 snaptrace_len = le32_to_cpu(h->snap_trace_len);
3564 p = snaptrace + snaptrace_len;
3566 if (le16_to_cpu(msg->hdr.version) >= 2) {
3568 ceph_decode_32_safe(&p, end, flock_len, bad);
3569 if (p + flock_len > end)
3574 if (le16_to_cpu(msg->hdr.version) >= 3) {
3575 if (op == CEPH_CAP_OP_IMPORT) {
3576 if (p + sizeof(*peer) > end)
3580 } else if (op == CEPH_CAP_OP_EXPORT) {
3581 /* recorded in unused fields */
3582 peer = (void *)&h->size;
3586 if (le16_to_cpu(msg->hdr.version) >= 4) {
3587 ceph_decode_64_safe(&p, end, inline_version, bad);
3588 ceph_decode_32_safe(&p, end, inline_len, bad);
3589 if (p + inline_len > end)
3595 if (le16_to_cpu(msg->hdr.version) >= 8) {
3597 u32 caller_uid, caller_gid;
3598 u32 osd_epoch_barrier;
3601 ceph_decode_32_safe(&p, end, osd_epoch_barrier, bad);
3603 ceph_decode_64_safe(&p, end, flush_tid, bad);
3605 ceph_decode_32_safe(&p, end, caller_uid, bad);
3606 ceph_decode_32_safe(&p, end, caller_gid, bad);
3608 ceph_decode_32_safe(&p, end, pool_ns_len, bad);
3609 if (pool_ns_len > 0) {
3610 ceph_decode_need(&p, end, pool_ns_len, bad);
3611 pool_ns = ceph_find_or_create_string(p, pool_ns_len);
3617 inode = ceph_find_inode(sb, vino);
3618 ci = ceph_inode(inode);
3619 dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
3622 mutex_lock(&session->s_mutex);
3624 dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
3628 dout(" i don't have ino %llx\n", vino.ino);
3630 if (op == CEPH_CAP_OP_IMPORT) {
3631 cap = ceph_get_cap(mdsc, NULL);
3632 cap->cap_ino = vino.ino;
3633 cap->queue_release = 1;
3634 cap->cap_id = le64_to_cpu(h->cap_id);
3637 spin_lock(&session->s_cap_lock);
3638 list_add_tail(&cap->session_caps,
3639 &session->s_cap_releases);
3640 session->s_num_cap_releases++;
3641 spin_unlock(&session->s_cap_lock);
3643 goto flush_cap_releases;
3646 /* these will work even if we don't have a cap yet */
3648 case CEPH_CAP_OP_FLUSHSNAP_ACK:
3649 handle_cap_flushsnap_ack(inode, tid, h, session);
3652 case CEPH_CAP_OP_EXPORT:
3653 handle_cap_export(inode, h, peer, session);
3656 case CEPH_CAP_OP_IMPORT:
3658 if (snaptrace_len) {
3659 down_write(&mdsc->snap_rwsem);
3660 ceph_update_snap_trace(mdsc, snaptrace,
3661 snaptrace + snaptrace_len,
3663 downgrade_write(&mdsc->snap_rwsem);
3665 down_read(&mdsc->snap_rwsem);
3667 handle_cap_import(mdsc, inode, h, peer, session,
3669 handle_cap_grant(mdsc, inode, h, &pool_ns,
3670 inline_version, inline_data, inline_len,
3671 msg->middle, session, cap, issued);
3673 ceph_put_snap_realm(mdsc, realm);
3677 /* the rest require a cap */
3678 spin_lock(&ci->i_ceph_lock);
3679 cap = __get_cap_for_mds(ceph_inode(inode), mds);
3681 dout(" no cap on %p ino %llx.%llx from mds%d\n",
3682 inode, ceph_ino(inode), ceph_snap(inode), mds);
3683 spin_unlock(&ci->i_ceph_lock);
3684 goto flush_cap_releases;
3687 /* note that each of these drops i_ceph_lock for us */
3689 case CEPH_CAP_OP_REVOKE:
3690 case CEPH_CAP_OP_GRANT:
3691 __ceph_caps_issued(ci, &issued);
3692 issued |= __ceph_caps_dirty(ci);
3693 handle_cap_grant(mdsc, inode, h, &pool_ns,
3694 inline_version, inline_data, inline_len,
3695 msg->middle, session, cap, issued);
3698 case CEPH_CAP_OP_FLUSH_ACK:
3699 handle_cap_flush_ack(inode, tid, h, session, cap);
3702 case CEPH_CAP_OP_TRUNC:
3703 handle_cap_trunc(inode, h, session);
3707 spin_unlock(&ci->i_ceph_lock);
3708 pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
3709 ceph_cap_op_name(op));
3716 * send any cap release message to try to move things
3717 * along for the mds (who clearly thinks we still have this
3720 ceph_send_cap_releases(mdsc, session);
3723 mutex_unlock(&session->s_mutex);
3726 ceph_put_string(pool_ns);
3730 pr_err("ceph_handle_caps: corrupt message\n");
3736 * Delayed work handler to process end of delayed cap release LRU list.
3738 void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
3740 struct ceph_inode_info *ci;
3741 int flags = CHECK_CAPS_NODELAY;
3743 dout("check_delayed_caps\n");
3745 spin_lock(&mdsc->cap_delay_lock);
3746 if (list_empty(&mdsc->cap_delay_list))
3748 ci = list_first_entry(&mdsc->cap_delay_list,
3749 struct ceph_inode_info,
3751 if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 &&
3752 time_before(jiffies, ci->i_hold_caps_max))
3754 list_del_init(&ci->i_cap_delay_list);
3755 spin_unlock(&mdsc->cap_delay_lock);
3756 dout("check_delayed_caps on %p\n", &ci->vfs_inode);
3757 ceph_check_caps(ci, flags, NULL);
3759 spin_unlock(&mdsc->cap_delay_lock);
3763 * Flush all dirty caps to the mds
3765 void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
3767 struct ceph_inode_info *ci;
3768 struct inode *inode;
3770 dout("flush_dirty_caps\n");
3771 spin_lock(&mdsc->cap_dirty_lock);
3772 while (!list_empty(&mdsc->cap_dirty)) {
3773 ci = list_first_entry(&mdsc->cap_dirty, struct ceph_inode_info,
3775 inode = &ci->vfs_inode;
3777 dout("flush_dirty_caps %p\n", inode);
3778 spin_unlock(&mdsc->cap_dirty_lock);
3779 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, NULL);
3781 spin_lock(&mdsc->cap_dirty_lock);
3783 spin_unlock(&mdsc->cap_dirty_lock);
3784 dout("flush_dirty_caps done\n");
3787 void __ceph_get_fmode(struct ceph_inode_info *ci, int fmode)
3790 int bits = (fmode << 1) | 1;
3791 for (i = 0; i < CEPH_FILE_MODE_BITS; i++) {
3792 if (bits & (1 << i))
3793 ci->i_nr_by_mode[i]++;
3798 * Drop open file reference. If we were the last open file,
3799 * we may need to release capabilities to the MDS (or schedule
3800 * their delayed release).
3802 void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
3805 int bits = (fmode << 1) | 1;
3806 spin_lock(&ci->i_ceph_lock);
3807 for (i = 0; i < CEPH_FILE_MODE_BITS; i++) {
3808 if (bits & (1 << i)) {
3809 BUG_ON(ci->i_nr_by_mode[i] == 0);
3810 if (--ci->i_nr_by_mode[i] == 0)
3814 dout("put_fmode %p fmode %d {%d,%d,%d,%d}\n",
3815 &ci->vfs_inode, fmode,
3816 ci->i_nr_by_mode[0], ci->i_nr_by_mode[1],
3817 ci->i_nr_by_mode[2], ci->i_nr_by_mode[3]);
3818 spin_unlock(&ci->i_ceph_lock);
3820 if (last && ci->i_vino.snap == CEPH_NOSNAP)
3821 ceph_check_caps(ci, 0, NULL);
3825 * Helpers for embedding cap and dentry lease releases into mds
3828 * @force is used by dentry_release (below) to force inclusion of a
3829 * record for the directory inode, even when there aren't any caps to
3832 int ceph_encode_inode_release(void **p, struct inode *inode,
3833 int mds, int drop, int unless, int force)
3835 struct ceph_inode_info *ci = ceph_inode(inode);
3836 struct ceph_cap *cap;
3837 struct ceph_mds_request_release *rel = *p;
3841 spin_lock(&ci->i_ceph_lock);
3842 used = __ceph_caps_used(ci);
3843 dirty = __ceph_caps_dirty(ci);
3845 dout("encode_inode_release %p mds%d used|dirty %s drop %s unless %s\n",
3846 inode, mds, ceph_cap_string(used|dirty), ceph_cap_string(drop),
3847 ceph_cap_string(unless));
3849 /* only drop unused, clean caps */
3850 drop &= ~(used | dirty);
3852 cap = __get_cap_for_mds(ci, mds);
3853 if (cap && __cap_is_valid(cap)) {
3855 ((cap->issued & drop) &&
3856 (cap->issued & unless) == 0)) {
3857 if ((cap->issued & drop) &&
3858 (cap->issued & unless) == 0) {
3859 int wanted = __ceph_caps_wanted(ci);
3860 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0)
3861 wanted |= cap->mds_wanted;
3862 dout("encode_inode_release %p cap %p "
3863 "%s -> %s, wanted %s -> %s\n", inode, cap,
3864 ceph_cap_string(cap->issued),
3865 ceph_cap_string(cap->issued & ~drop),
3866 ceph_cap_string(cap->mds_wanted),
3867 ceph_cap_string(wanted));
3869 cap->issued &= ~drop;
3870 cap->implemented &= ~drop;
3871 cap->mds_wanted = wanted;
3873 dout("encode_inode_release %p cap %p %s"
3874 " (force)\n", inode, cap,
3875 ceph_cap_string(cap->issued));
3878 rel->ino = cpu_to_le64(ceph_ino(inode));
3879 rel->cap_id = cpu_to_le64(cap->cap_id);
3880 rel->seq = cpu_to_le32(cap->seq);
3881 rel->issue_seq = cpu_to_le32(cap->issue_seq);
3882 rel->mseq = cpu_to_le32(cap->mseq);
3883 rel->caps = cpu_to_le32(cap->implemented);
3884 rel->wanted = cpu_to_le32(cap->mds_wanted);
3890 dout("encode_inode_release %p cap %p %s\n",
3891 inode, cap, ceph_cap_string(cap->issued));
3894 spin_unlock(&ci->i_ceph_lock);
3898 int ceph_encode_dentry_release(void **p, struct dentry *dentry,
3899 int mds, int drop, int unless)
3901 struct inode *dir = d_inode(dentry->d_parent);
3902 struct ceph_mds_request_release *rel = *p;
3903 struct ceph_dentry_info *di = ceph_dentry(dentry);
3908 * force an record for the directory caps if we have a dentry lease.
3909 * this is racy (can't take i_ceph_lock and d_lock together), but it
3910 * doesn't have to be perfect; the mds will revoke anything we don't
3913 spin_lock(&dentry->d_lock);
3914 if (di->lease_session && di->lease_session->s_mds == mds)
3916 spin_unlock(&dentry->d_lock);
3918 ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);
3920 spin_lock(&dentry->d_lock);
3921 if (ret && di->lease_session && di->lease_session->s_mds == mds) {
3922 dout("encode_dentry_release %p mds%d seq %d\n",
3923 dentry, mds, (int)di->lease_seq);
3924 rel->dname_len = cpu_to_le32(dentry->d_name.len);
3925 memcpy(*p, dentry->d_name.name, dentry->d_name.len);
3926 *p += dentry->d_name.len;
3927 rel->dname_seq = cpu_to_le32(di->lease_seq);
3928 __ceph_mdsc_drop_dentry_lease(dentry);
3930 spin_unlock(&dentry->d_lock);