1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3 #include <linux/ceph/striper.h>
5 #include <linux/module.h>
6 #include <linux/sched.h>
7 #include <linux/slab.h>
8 #include <linux/file.h>
9 #include <linux/mount.h>
10 #include <linux/namei.h>
11 #include <linux/writeback.h>
12 #include <linux/falloc.h>
13 #include <linux/iversion.h>
14 #include <linux/ktime.h>
17 #include "mds_client.h"
22 static __le32 ceph_flags_sys2wire(u32 flags)
26 switch (flags & O_ACCMODE) {
28 wire_flags |= CEPH_O_RDONLY;
31 wire_flags |= CEPH_O_WRONLY;
34 wire_flags |= CEPH_O_RDWR;
40 #define ceph_sys2wire(a) if (flags & a) { wire_flags |= CEPH_##a; flags &= ~a; }
42 ceph_sys2wire(O_CREAT);
43 ceph_sys2wire(O_EXCL);
44 ceph_sys2wire(O_TRUNC);
45 ceph_sys2wire(O_DIRECTORY);
46 ceph_sys2wire(O_NOFOLLOW);
51 dout("unused open flags: %x\n", flags);
53 return cpu_to_le32(wire_flags);
57 * Ceph file operations
59 * Implement basic open/close functionality, and implement
62 * We implement three modes of file I/O:
63 * - buffered uses the generic_file_aio_{read,write} helpers
65 * - synchronous is used when there is multi-client read/write
66 * sharing, avoids the page cache, and synchronously waits for an
69 * - direct io takes the variant of the sync path that references
70 * user pages directly.
72 * fsync() flushes and waits on dirty pages, but just queues metadata
73 * for writeback: since the MDS can recover size and mtime there is no
74 * need to wait for MDS acknowledgement.
78 * How many pages to get in one call to iov_iter_get_pages(). This
79 * determines the size of the on-stack array used as a buffer.
81 #define ITER_GET_BVECS_PAGES 64
83 static ssize_t __iter_get_bvecs(struct iov_iter *iter, size_t maxsize,
84 struct bio_vec *bvecs)
89 if (maxsize > iov_iter_count(iter))
90 maxsize = iov_iter_count(iter);
92 while (size < maxsize) {
93 struct page *pages[ITER_GET_BVECS_PAGES];
98 bytes = iov_iter_get_pages(iter, pages, maxsize - size,
99 ITER_GET_BVECS_PAGES, &start);
101 return size ?: bytes;
103 iov_iter_advance(iter, bytes);
106 for ( ; bytes; idx++, bvec_idx++) {
107 struct bio_vec bv = {
108 .bv_page = pages[idx],
109 .bv_len = min_t(int, bytes, PAGE_SIZE - start),
113 bvecs[bvec_idx] = bv;
123 * iov_iter_get_pages() only considers one iov_iter segment, no matter
124 * what maxsize or maxpages are given. For ITER_BVEC that is a single
127 * Attempt to get up to @maxsize bytes worth of pages from @iter.
128 * Return the number of bytes in the created bio_vec array, or an error.
130 static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
131 struct bio_vec **bvecs, int *num_bvecs)
134 size_t orig_count = iov_iter_count(iter);
138 iov_iter_truncate(iter, maxsize);
139 npages = iov_iter_npages(iter, INT_MAX);
140 iov_iter_reexpand(iter, orig_count);
143 * __iter_get_bvecs() may populate only part of the array -- zero it
146 bv = kvmalloc_array(npages, sizeof(*bv), GFP_KERNEL | __GFP_ZERO);
150 bytes = __iter_get_bvecs(iter, maxsize, bv);
153 * No pages were pinned -- just free the array.
164 static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
168 for (i = 0; i < num_bvecs; i++) {
169 if (bvecs[i].bv_page) {
171 set_page_dirty_lock(bvecs[i].bv_page);
172 put_page(bvecs[i].bv_page);
179 * Prepare an open request. Preallocate ceph_cap to avoid an
180 * inopportune ENOMEM later.
182 static struct ceph_mds_request *
183 prepare_open_request(struct super_block *sb, int flags, int create_mode)
185 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(sb);
186 struct ceph_mds_request *req;
187 int want_auth = USE_ANY_MDS;
188 int op = (flags & O_CREAT) ? CEPH_MDS_OP_CREATE : CEPH_MDS_OP_OPEN;
190 if (flags & (O_WRONLY|O_RDWR|O_CREAT|O_TRUNC))
191 want_auth = USE_AUTH_MDS;
193 req = ceph_mdsc_create_request(mdsc, op, want_auth);
196 req->r_fmode = ceph_flags_to_mode(flags);
197 req->r_args.open.flags = ceph_flags_sys2wire(flags);
198 req->r_args.open.mode = cpu_to_le32(create_mode);
203 static int ceph_init_file_info(struct inode *inode, struct file *file,
204 int fmode, bool isdir)
206 struct ceph_inode_info *ci = ceph_inode(inode);
207 struct ceph_mount_options *opt =
208 ceph_inode_to_client(&ci->netfs.inode)->mount_options;
209 struct ceph_file_info *fi;
212 dout("%s %p %p 0%o (%s)\n", __func__, inode, file,
213 inode->i_mode, isdir ? "dir" : "regular");
214 BUG_ON(inode->i_fop->release != ceph_release);
217 struct ceph_dir_file_info *dfi =
218 kmem_cache_zalloc(ceph_dir_file_cachep, GFP_KERNEL);
222 file->private_data = dfi;
223 fi = &dfi->file_info;
224 dfi->next_offset = 2;
225 dfi->readdir_cache_idx = -1;
227 fi = kmem_cache_zalloc(ceph_file_cachep, GFP_KERNEL);
231 if (opt->flags & CEPH_MOUNT_OPT_NOPAGECACHE)
232 fi->flags |= CEPH_F_SYNC;
234 file->private_data = fi;
237 ceph_get_fmode(ci, fmode, 1);
240 spin_lock_init(&fi->rw_contexts_lock);
241 INIT_LIST_HEAD(&fi->rw_contexts);
242 fi->filp_gen = READ_ONCE(ceph_inode_to_client(inode)->filp_gen);
244 if ((file->f_mode & FMODE_WRITE) &&
245 ci->i_inline_version != CEPH_INLINE_NONE) {
246 ret = ceph_uninline_data(file);
254 ceph_fscache_unuse_cookie(inode, file->f_mode & FMODE_WRITE);
255 ceph_put_fmode(ci, fi->fmode, 1);
256 kmem_cache_free(ceph_file_cachep, fi);
257 /* wake up anyone waiting for caps on this inode */
258 wake_up_all(&ci->i_cap_wq);
263 * initialize private struct file data.
264 * if we fail, clean up by dropping fmode reference on the ceph_inode
266 static int ceph_init_file(struct inode *inode, struct file *file, int fmode)
270 switch (inode->i_mode & S_IFMT) {
272 ceph_fscache_use_cookie(inode, file->f_mode & FMODE_WRITE);
275 ret = ceph_init_file_info(inode, file, fmode,
276 S_ISDIR(inode->i_mode));
280 dout("init_file %p %p 0%o (symlink)\n", inode, file,
285 dout("init_file %p %p 0%o (special)\n", inode, file,
288 * we need to drop the open ref now, since we don't
289 * have .release set to ceph_release.
291 BUG_ON(inode->i_fop->release == ceph_release);
293 /* call the proper open fop */
294 ret = inode->i_fop->open(inode, file);
300 * try renew caps after session gets killed.
302 int ceph_renew_caps(struct inode *inode, int fmode)
304 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(inode->i_sb);
305 struct ceph_inode_info *ci = ceph_inode(inode);
306 struct ceph_mds_request *req;
307 int err, flags, wanted;
309 spin_lock(&ci->i_ceph_lock);
310 __ceph_touch_fmode(ci, mdsc, fmode);
311 wanted = __ceph_caps_file_wanted(ci);
312 if (__ceph_is_any_real_caps(ci) &&
313 (!(wanted & CEPH_CAP_ANY_WR) || ci->i_auth_cap)) {
314 int issued = __ceph_caps_issued(ci, NULL);
315 spin_unlock(&ci->i_ceph_lock);
316 dout("renew caps %p want %s issued %s updating mds_wanted\n",
317 inode, ceph_cap_string(wanted), ceph_cap_string(issued));
318 ceph_check_caps(ci, 0, NULL);
321 spin_unlock(&ci->i_ceph_lock);
324 if ((wanted & CEPH_CAP_FILE_RD) && (wanted & CEPH_CAP_FILE_WR))
326 else if (wanted & CEPH_CAP_FILE_RD)
328 else if (wanted & CEPH_CAP_FILE_WR)
331 if (wanted & CEPH_CAP_FILE_LAZYIO)
335 req = prepare_open_request(inode->i_sb, flags, 0);
341 req->r_inode = inode;
345 err = ceph_mdsc_do_request(mdsc, NULL, req);
346 ceph_mdsc_put_request(req);
348 dout("renew caps %p open result=%d\n", inode, err);
349 return err < 0 ? err : 0;
353 * If we already have the requisite capabilities, we can satisfy
354 * the open request locally (no need to request new caps from the
355 * MDS). We do, however, need to inform the MDS (asynchronously)
356 * if our wanted caps set expands.
358 int ceph_open(struct inode *inode, struct file *file)
360 struct ceph_inode_info *ci = ceph_inode(inode);
361 struct ceph_fs_client *fsc = ceph_sb_to_client(inode->i_sb);
362 struct ceph_mds_client *mdsc = fsc->mdsc;
363 struct ceph_mds_request *req;
364 struct ceph_file_info *fi = file->private_data;
366 int flags, fmode, wanted;
369 dout("open file %p is already opened\n", file);
373 /* filter out O_CREAT|O_EXCL; vfs did that already. yuck. */
374 flags = file->f_flags & ~(O_CREAT|O_EXCL);
375 if (S_ISDIR(inode->i_mode))
376 flags = O_DIRECTORY; /* mds likes to know */
378 dout("open inode %p ino %llx.%llx file %p flags %d (%d)\n", inode,
379 ceph_vinop(inode), file, flags, file->f_flags);
380 fmode = ceph_flags_to_mode(flags);
381 wanted = ceph_caps_for_mode(fmode);
383 /* snapped files are read-only */
384 if (ceph_snap(inode) != CEPH_NOSNAP && (file->f_mode & FMODE_WRITE))
387 /* trivially open snapdir */
388 if (ceph_snap(inode) == CEPH_SNAPDIR) {
389 return ceph_init_file(inode, file, fmode);
393 * No need to block if we have caps on the auth MDS (for
394 * write) or any MDS (for read). Update wanted set
397 spin_lock(&ci->i_ceph_lock);
398 if (__ceph_is_any_real_caps(ci) &&
399 (((fmode & CEPH_FILE_MODE_WR) == 0) || ci->i_auth_cap)) {
400 int mds_wanted = __ceph_caps_mds_wanted(ci, true);
401 int issued = __ceph_caps_issued(ci, NULL);
403 dout("open %p fmode %d want %s issued %s using existing\n",
404 inode, fmode, ceph_cap_string(wanted),
405 ceph_cap_string(issued));
406 __ceph_touch_fmode(ci, mdsc, fmode);
407 spin_unlock(&ci->i_ceph_lock);
410 if ((issued & wanted) != wanted &&
411 (mds_wanted & wanted) != wanted &&
412 ceph_snap(inode) != CEPH_SNAPDIR)
413 ceph_check_caps(ci, 0, NULL);
415 return ceph_init_file(inode, file, fmode);
416 } else if (ceph_snap(inode) != CEPH_NOSNAP &&
417 (ci->i_snap_caps & wanted) == wanted) {
418 __ceph_touch_fmode(ci, mdsc, fmode);
419 spin_unlock(&ci->i_ceph_lock);
420 return ceph_init_file(inode, file, fmode);
423 spin_unlock(&ci->i_ceph_lock);
425 dout("open fmode %d wants %s\n", fmode, ceph_cap_string(wanted));
426 req = prepare_open_request(inode->i_sb, flags, 0);
431 req->r_inode = inode;
435 err = ceph_mdsc_do_request(mdsc, NULL, req);
437 err = ceph_init_file(inode, file, req->r_fmode);
438 ceph_mdsc_put_request(req);
439 dout("open result=%d on %llx.%llx\n", err, ceph_vinop(inode));
444 /* Clone the layout from a synchronous create, if the dir now has Dc caps */
446 cache_file_layout(struct inode *dst, struct inode *src)
448 struct ceph_inode_info *cdst = ceph_inode(dst);
449 struct ceph_inode_info *csrc = ceph_inode(src);
451 spin_lock(&cdst->i_ceph_lock);
452 if ((__ceph_caps_issued(cdst, NULL) & CEPH_CAP_DIR_CREATE) &&
453 !ceph_file_layout_is_valid(&cdst->i_cached_layout)) {
454 memcpy(&cdst->i_cached_layout, &csrc->i_layout,
455 sizeof(cdst->i_cached_layout));
456 rcu_assign_pointer(cdst->i_cached_layout.pool_ns,
457 ceph_try_get_string(csrc->i_layout.pool_ns));
459 spin_unlock(&cdst->i_ceph_lock);
463 * Try to set up an async create. We need caps, a file layout, and inode number,
464 * and either a lease on the dentry or complete dir info. If any of those
465 * criteria are not satisfied, then return false and the caller can go
468 static int try_prep_async_create(struct inode *dir, struct dentry *dentry,
469 struct ceph_file_layout *lo, u64 *pino)
471 struct ceph_inode_info *ci = ceph_inode(dir);
472 struct ceph_dentry_info *di = ceph_dentry(dentry);
473 int got = 0, want = CEPH_CAP_FILE_EXCL | CEPH_CAP_DIR_CREATE;
476 spin_lock(&ci->i_ceph_lock);
477 /* No auth cap means no chance for Dc caps */
481 /* Any delegated inos? */
482 if (xa_empty(&ci->i_auth_cap->session->s_delegated_inos))
485 if (!ceph_file_layout_is_valid(&ci->i_cached_layout))
488 if ((__ceph_caps_issued(ci, NULL) & want) != want)
491 if (d_in_lookup(dentry)) {
492 if (!__ceph_dir_is_complete(ci))
494 spin_lock(&dentry->d_lock);
495 di->lease_shared_gen = atomic_read(&ci->i_shared_gen);
496 spin_unlock(&dentry->d_lock);
497 } else if (atomic_read(&ci->i_shared_gen) !=
498 READ_ONCE(di->lease_shared_gen)) {
502 ino = ceph_get_deleg_ino(ci->i_auth_cap->session);
507 ceph_take_cap_refs(ci, want, false);
508 memcpy(lo, &ci->i_cached_layout, sizeof(*lo));
509 rcu_assign_pointer(lo->pool_ns,
510 ceph_try_get_string(ci->i_cached_layout.pool_ns));
513 spin_unlock(&ci->i_ceph_lock);
517 static void restore_deleg_ino(struct inode *dir, u64 ino)
519 struct ceph_inode_info *ci = ceph_inode(dir);
520 struct ceph_mds_session *s = NULL;
522 spin_lock(&ci->i_ceph_lock);
524 s = ceph_get_mds_session(ci->i_auth_cap->session);
525 spin_unlock(&ci->i_ceph_lock);
527 int err = ceph_restore_deleg_ino(s, ino);
529 pr_warn("ceph: unable to restore delegated ino 0x%llx to session: %d\n",
531 ceph_put_mds_session(s);
535 static void wake_async_create_waiters(struct inode *inode,
536 struct ceph_mds_session *session)
538 struct ceph_inode_info *ci = ceph_inode(inode);
540 spin_lock(&ci->i_ceph_lock);
541 if (ci->i_ceph_flags & CEPH_I_ASYNC_CREATE) {
542 ci->i_ceph_flags &= ~CEPH_I_ASYNC_CREATE;
543 wake_up_bit(&ci->i_ceph_flags, CEPH_ASYNC_CREATE_BIT);
545 ceph_kick_flushing_inode_caps(session, ci);
546 spin_unlock(&ci->i_ceph_lock);
549 static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
550 struct ceph_mds_request *req)
552 struct dentry *dentry = req->r_dentry;
553 struct inode *dinode = d_inode(dentry);
554 struct inode *tinode = req->r_target_inode;
555 int result = req->r_err ? req->r_err :
556 le32_to_cpu(req->r_reply_info.head->result);
558 WARN_ON_ONCE(dinode && tinode && dinode != tinode);
560 /* MDS changed -- caller must resubmit */
561 if (result == -EJUKEBOX)
564 mapping_set_error(req->r_parent->i_mapping, result);
569 char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
572 pr_warn("ceph: async create failure path=(%llx)%s result=%d!\n",
573 base, IS_ERR(path) ? "<<bad>>" : path, result);
574 ceph_mdsc_free_path(path, pathlen);
576 ceph_dir_clear_complete(req->r_parent);
577 if (!d_unhashed(dentry))
581 mapping_set_error(dinode->i_mapping, result);
582 ceph_inode_shutdown(dinode);
583 wake_async_create_waiters(dinode, req->r_session);
588 u64 ino = ceph_vino(tinode).ino;
590 if (req->r_deleg_ino != ino)
591 pr_warn("%s: inode number mismatch! err=%d deleg_ino=0x%llx target=0x%llx\n",
592 __func__, req->r_err, req->r_deleg_ino, ino);
594 mapping_set_error(tinode->i_mapping, result);
595 wake_async_create_waiters(tinode, req->r_session);
596 } else if (!result) {
597 pr_warn("%s: no req->r_target_inode for 0x%llx\n", __func__,
601 ceph_mdsc_release_dir_caps(req);
604 static int ceph_finish_async_create(struct inode *dir, struct dentry *dentry,
605 struct file *file, umode_t mode,
606 struct ceph_mds_request *req,
607 struct ceph_acl_sec_ctx *as_ctx,
608 struct ceph_file_layout *lo)
612 struct ceph_mds_reply_inode in = { };
613 struct ceph_mds_reply_info_in iinfo = { .in = &in };
614 struct ceph_inode_info *ci = ceph_inode(dir);
616 struct timespec64 now;
617 struct ceph_string *pool_ns;
618 struct ceph_mds_client *mdsc = ceph_sb_to_mdsc(dir->i_sb);
619 struct ceph_vino vino = { .ino = req->r_deleg_ino,
620 .snap = CEPH_NOSNAP };
622 ktime_get_real_ts64(&now);
624 inode = ceph_get_inode(dentry->d_sb, vino);
626 return PTR_ERR(inode);
628 iinfo.inline_version = CEPH_INLINE_NONE;
629 iinfo.change_attr = 1;
630 ceph_encode_timespec64(&iinfo.btime, &now);
632 if (req->r_pagelist) {
633 iinfo.xattr_len = req->r_pagelist->length;
634 iinfo.xattr_data = req->r_pagelist->mapped_tail;
637 iinfo.xattr_len = ARRAY_SIZE(xattr_buf);
638 iinfo.xattr_data = xattr_buf;
639 memset(iinfo.xattr_data, 0, iinfo.xattr_len);
642 in.ino = cpu_to_le64(vino.ino);
643 in.snapid = cpu_to_le64(CEPH_NOSNAP);
644 in.version = cpu_to_le64(1); // ???
645 in.cap.caps = in.cap.wanted = cpu_to_le32(CEPH_CAP_ALL_FILE);
646 in.cap.cap_id = cpu_to_le64(1);
647 in.cap.realm = cpu_to_le64(ci->i_snap_realm->ino);
648 in.cap.flags = CEPH_CAP_FLAG_AUTH;
649 in.ctime = in.mtime = in.atime = iinfo.btime;
650 in.truncate_seq = cpu_to_le32(1);
651 in.truncate_size = cpu_to_le64(-1ULL);
652 in.xattr_version = cpu_to_le64(1);
653 in.uid = cpu_to_le32(from_kuid(&init_user_ns, current_fsuid()));
654 if (dir->i_mode & S_ISGID) {
655 in.gid = cpu_to_le32(from_kgid(&init_user_ns, dir->i_gid));
657 /* Directories always inherit the setgid bit. */
660 else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) &&
661 !in_group_p(dir->i_gid) &&
662 !capable_wrt_inode_uidgid(&init_user_ns, dir, CAP_FSETID))
665 in.gid = cpu_to_le32(from_kgid(&init_user_ns, current_fsgid()));
667 in.mode = cpu_to_le32((u32)mode);
669 in.nlink = cpu_to_le32(1);
670 in.max_size = cpu_to_le64(lo->stripe_unit);
672 ceph_file_layout_to_legacy(lo, &in.layout);
673 /* lo is private, so pool_ns can't change */
674 pool_ns = rcu_dereference_raw(lo->pool_ns);
676 iinfo.pool_ns_len = pool_ns->len;
677 iinfo.pool_ns_data = pool_ns->str;
680 down_read(&mdsc->snap_rwsem);
681 ret = ceph_fill_inode(inode, NULL, &iinfo, NULL, req->r_session,
683 up_read(&mdsc->snap_rwsem);
685 dout("%s failed to fill inode: %d\n", __func__, ret);
686 ceph_dir_clear_complete(dir);
687 if (!d_unhashed(dentry))
689 if (inode->i_state & I_NEW)
690 discard_new_inode(inode);
694 dout("%s d_adding new inode 0x%llx to 0x%llx/%s\n", __func__,
695 vino.ino, ceph_ino(dir), dentry->d_name.name);
696 ceph_dir_clear_ordered(dir);
697 ceph_init_inode_acls(inode, as_ctx);
698 if (inode->i_state & I_NEW) {
700 * If it's not I_NEW, then someone created this before
701 * we got here. Assume the server is aware of it at
702 * that point and don't worry about setting
703 * CEPH_I_ASYNC_CREATE.
705 ceph_inode(inode)->i_ceph_flags = CEPH_I_ASYNC_CREATE;
706 unlock_new_inode(inode);
708 if (d_in_lookup(dentry) || d_really_is_negative(dentry)) {
709 if (!d_unhashed(dentry))
711 dn = d_splice_alias(inode, dentry);
712 WARN_ON_ONCE(dn && dn != dentry);
714 file->f_mode |= FMODE_CREATED;
715 ret = finish_open(file, dentry, ceph_open);
721 * Do a lookup + open with a single request. If we get a non-existent
722 * file or symlink, return 1 so the VFS can retry.
724 int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
725 struct file *file, unsigned flags, umode_t mode)
727 struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
728 struct ceph_mds_client *mdsc = fsc->mdsc;
729 struct ceph_mds_request *req;
731 struct ceph_acl_sec_ctx as_ctx = {};
732 bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS);
736 dout("atomic_open %p dentry %p '%pd' %s flags %d mode 0%o\n",
738 d_unhashed(dentry) ? "unhashed" : "hashed", flags, mode);
740 if (dentry->d_name.len > NAME_MAX)
741 return -ENAMETOOLONG;
743 if (flags & O_CREAT) {
744 if (ceph_quota_is_max_files_exceeded(dir))
746 err = ceph_pre_init_acls(dir, &mode, &as_ctx);
749 err = ceph_security_init_secctx(dentry, mode, &as_ctx);
752 /* Async create can't handle more than a page of xattrs */
753 if (as_ctx.pagelist &&
754 !list_is_singular(&as_ctx.pagelist->head))
756 } else if (!d_in_lookup(dentry)) {
757 /* If it's not being looked up, it's negative */
762 req = prepare_open_request(dir->i_sb, flags, mode);
767 req->r_dentry = dget(dentry);
769 mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
770 if (ceph_security_xattr_wanted(dir))
771 mask |= CEPH_CAP_XATTR_SHARED;
772 req->r_args.open.mask = cpu_to_le32(mask);
776 if (flags & O_CREAT) {
777 struct ceph_file_layout lo;
779 req->r_dentry_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_AUTH_EXCL;
780 req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
781 if (as_ctx.pagelist) {
782 req->r_pagelist = as_ctx.pagelist;
783 as_ctx.pagelist = NULL;
787 try_prep_async_create(dir, dentry, &lo,
788 &req->r_deleg_ino))) {
789 set_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags);
790 req->r_args.open.flags |= cpu_to_le32(CEPH_O_EXCL);
791 req->r_callback = ceph_async_create_cb;
792 err = ceph_mdsc_submit_request(mdsc, dir, req);
794 err = ceph_finish_async_create(dir, dentry,
797 } else if (err == -EJUKEBOX) {
798 restore_deleg_ino(dir, req->r_deleg_ino);
799 ceph_mdsc_put_request(req);
801 ceph_put_string(rcu_dereference_raw(lo.pool_ns));
804 ceph_put_string(rcu_dereference_raw(lo.pool_ns));
809 set_bit(CEPH_MDS_R_PARENT_LOCKED, &req->r_req_flags);
810 err = ceph_mdsc_do_request(mdsc,
811 (flags & (O_CREAT|O_TRUNC)) ? dir : NULL,
813 if (err == -ENOENT) {
814 dentry = ceph_handle_snapdir(req, dentry);
815 if (IS_ERR(dentry)) {
816 err = PTR_ERR(dentry);
822 if (!err && (flags & O_CREAT) && !req->r_reply_info.head->is_dentry)
823 err = ceph_handle_notrace_create(dir, dentry);
825 if (d_in_lookup(dentry)) {
826 dn = ceph_finish_lookup(req, dentry, err);
830 /* we were given a hashed negative dentry */
835 if (dn || d_really_is_negative(dentry) || d_is_symlink(dentry)) {
836 /* make vfs retry on splice, ENOENT, or symlink */
837 dout("atomic_open finish_no_open on dn %p\n", dn);
838 err = finish_no_open(file, dn);
840 dout("atomic_open finish_open on dn %p\n", dn);
841 if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
842 struct inode *newino = d_inode(dentry);
844 cache_file_layout(dir, newino);
845 ceph_init_inode_acls(newino, &as_ctx);
846 file->f_mode |= FMODE_CREATED;
848 err = finish_open(file, dentry, ceph_open);
851 ceph_mdsc_put_request(req);
853 ceph_release_acl_sec_ctx(&as_ctx);
854 dout("atomic_open result=%d\n", err);
858 int ceph_release(struct inode *inode, struct file *file)
860 struct ceph_inode_info *ci = ceph_inode(inode);
862 if (S_ISDIR(inode->i_mode)) {
863 struct ceph_dir_file_info *dfi = file->private_data;
864 dout("release inode %p dir file %p\n", inode, file);
865 WARN_ON(!list_empty(&dfi->file_info.rw_contexts));
867 ceph_put_fmode(ci, dfi->file_info.fmode, 1);
869 if (dfi->last_readdir)
870 ceph_mdsc_put_request(dfi->last_readdir);
871 kfree(dfi->last_name);
872 kfree(dfi->dir_info);
873 kmem_cache_free(ceph_dir_file_cachep, dfi);
875 struct ceph_file_info *fi = file->private_data;
876 dout("release inode %p regular file %p\n", inode, file);
877 WARN_ON(!list_empty(&fi->rw_contexts));
879 ceph_fscache_unuse_cookie(inode, file->f_mode & FMODE_WRITE);
880 ceph_put_fmode(ci, fi->fmode, 1);
882 kmem_cache_free(ceph_file_cachep, fi);
885 /* wake up anyone waiting for caps on this inode */
886 wake_up_all(&ci->i_cap_wq);
897 * Completely synchronous read and write methods. Direct from __user
898 * buffer to osd, or directly to user pages (if O_DIRECT).
900 * If the read spans object boundary, just do multiple reads. (That's not
901 * atomic, but good enough for now.)
903 * If we get a short result from the OSD, check against i_size; we need to
904 * only return a short read to the caller if we hit EOF.
906 static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
909 struct file *file = iocb->ki_filp;
910 struct inode *inode = file_inode(file);
911 struct ceph_inode_info *ci = ceph_inode(inode);
912 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
913 struct ceph_osd_client *osdc = &fsc->client->osdc;
915 u64 off = iocb->ki_pos;
916 u64 len = iov_iter_count(to);
917 u64 i_size = i_size_read(inode);
919 dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
920 (file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
925 * flush any page cache pages in this range. this
926 * will make concurrent normal and sync io slow,
927 * but it will at least behave sensibly when they are
930 ret = filemap_write_and_wait_range(inode->i_mapping,
936 while ((len = iov_iter_count(to)) > 0) {
937 struct ceph_osd_request *req;
945 req = ceph_osdc_new_request(osdc, &ci->i_layout,
946 ci->i_vino, off, &len, 0, 1,
947 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
948 NULL, ci->i_truncate_seq,
949 ci->i_truncate_size, false);
955 more = len < iov_iter_count(to);
957 num_pages = calc_pages_for(off, len);
958 page_off = off & ~PAGE_MASK;
959 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
961 ceph_osdc_put_request(req);
962 ret = PTR_ERR(pages);
966 osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_off,
968 ret = ceph_osdc_start_request(osdc, req, false);
970 ret = ceph_osdc_wait_request(osdc, req);
972 ceph_update_read_metrics(&fsc->mdsc->metric,
973 req->r_start_latency,
977 ceph_osdc_put_request(req);
979 i_size = i_size_read(inode);
980 dout("sync_read %llu~%llu got %zd i_size %llu%s\n",
981 off, len, ret, i_size, (more ? " MORE" : ""));
985 if (ret >= 0 && ret < len && (off + ret < i_size)) {
986 int zlen = min(len - ret, i_size - off - ret);
987 int zoff = page_off + ret;
988 dout("sync_read zero gap %llu~%llu\n",
989 off + ret, off + ret + zlen);
990 ceph_zero_page_vector_range(zoff, zlen, pages);
995 left = ret > 0 ? ret : 0;
998 page_off = off & ~PAGE_MASK;
999 len = min_t(size_t, left, PAGE_SIZE - page_off);
1000 SetPageUptodate(pages[idx]);
1001 copied = copy_page_to_iter(pages[idx++],
1010 ceph_release_page_vector(pages, num_pages);
1013 if (ret == -EBLOCKLISTED)
1014 fsc->blocklisted = true;
1018 if (off >= i_size || !more)
1022 if (off > iocb->ki_pos) {
1023 if (off >= i_size) {
1024 *retry_op = CHECK_EOF;
1025 ret = i_size - iocb->ki_pos;
1026 iocb->ki_pos = i_size;
1028 ret = off - iocb->ki_pos;
1033 dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
1037 struct ceph_aio_request {
1043 struct list_head osd_reqs;
1045 atomic_t pending_reqs;
1046 struct timespec64 mtime;
1047 struct ceph_cap_flush *prealloc_cf;
1050 struct ceph_aio_work {
1051 struct work_struct work;
1052 struct ceph_osd_request *req;
1055 static void ceph_aio_retry_work(struct work_struct *work);
1057 static void ceph_aio_complete(struct inode *inode,
1058 struct ceph_aio_request *aio_req)
1060 struct ceph_inode_info *ci = ceph_inode(inode);
1063 if (!atomic_dec_and_test(&aio_req->pending_reqs))
1066 if (aio_req->iocb->ki_flags & IOCB_DIRECT)
1067 inode_dio_end(inode);
1069 ret = aio_req->error;
1071 ret = aio_req->total_len;
1073 dout("ceph_aio_complete %p rc %d\n", inode, ret);
1075 if (ret >= 0 && aio_req->write) {
1078 loff_t endoff = aio_req->iocb->ki_pos + aio_req->total_len;
1079 if (endoff > i_size_read(inode)) {
1080 if (ceph_inode_set_size(inode, endoff))
1081 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
1084 spin_lock(&ci->i_ceph_lock);
1085 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1086 &aio_req->prealloc_cf);
1087 spin_unlock(&ci->i_ceph_lock);
1089 __mark_inode_dirty(inode, dirty);
1093 ceph_put_cap_refs(ci, (aio_req->write ? CEPH_CAP_FILE_WR :
1096 aio_req->iocb->ki_complete(aio_req->iocb, ret);
1098 ceph_free_cap_flush(aio_req->prealloc_cf);
1102 static void ceph_aio_complete_req(struct ceph_osd_request *req)
1104 int rc = req->r_result;
1105 struct inode *inode = req->r_inode;
1106 struct ceph_aio_request *aio_req = req->r_priv;
1107 struct ceph_osd_data *osd_data = osd_req_op_extent_osd_data(req, 0);
1108 struct ceph_client_metric *metric = &ceph_sb_to_mdsc(inode->i_sb)->metric;
1109 unsigned int len = osd_data->bvec_pos.iter.bi_size;
1111 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_BVECS);
1112 BUG_ON(!osd_data->num_bvecs);
1114 dout("ceph_aio_complete_req %p rc %d bytes %u\n", inode, rc, len);
1116 if (rc == -EOLDSNAPC) {
1117 struct ceph_aio_work *aio_work;
1118 BUG_ON(!aio_req->write);
1120 aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
1122 INIT_WORK(&aio_work->work, ceph_aio_retry_work);
1123 aio_work->req = req;
1124 queue_work(ceph_inode_to_client(inode)->inode_wq,
1129 } else if (!aio_req->write) {
1132 if (rc >= 0 && len > rc) {
1134 int zlen = len - rc;
1137 * If read is satisfied by single OSD request,
1138 * it can pass EOF. Otherwise read is within
1141 if (aio_req->num_reqs == 1) {
1142 loff_t i_size = i_size_read(inode);
1143 loff_t endoff = aio_req->iocb->ki_pos + rc;
1144 if (endoff < i_size)
1145 zlen = min_t(size_t, zlen,
1147 aio_req->total_len = rc + zlen;
1150 iov_iter_bvec(&i, READ, osd_data->bvec_pos.bvecs,
1151 osd_data->num_bvecs, len);
1152 iov_iter_advance(&i, rc);
1153 iov_iter_zero(zlen, &i);
1157 /* r_start_latency == 0 means the request was not submitted */
1158 if (req->r_start_latency) {
1160 ceph_update_write_metrics(metric, req->r_start_latency,
1161 req->r_end_latency, len, rc);
1163 ceph_update_read_metrics(metric, req->r_start_latency,
1164 req->r_end_latency, len, rc);
1167 put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
1168 aio_req->should_dirty);
1169 ceph_osdc_put_request(req);
1172 cmpxchg(&aio_req->error, 0, rc);
1174 ceph_aio_complete(inode, aio_req);
1178 static void ceph_aio_retry_work(struct work_struct *work)
1180 struct ceph_aio_work *aio_work =
1181 container_of(work, struct ceph_aio_work, work);
1182 struct ceph_osd_request *orig_req = aio_work->req;
1183 struct ceph_aio_request *aio_req = orig_req->r_priv;
1184 struct inode *inode = orig_req->r_inode;
1185 struct ceph_inode_info *ci = ceph_inode(inode);
1186 struct ceph_snap_context *snapc;
1187 struct ceph_osd_request *req;
1190 spin_lock(&ci->i_ceph_lock);
1191 if (__ceph_have_pending_cap_snap(ci)) {
1192 struct ceph_cap_snap *capsnap =
1193 list_last_entry(&ci->i_cap_snaps,
1194 struct ceph_cap_snap,
1196 snapc = ceph_get_snap_context(capsnap->context);
1198 BUG_ON(!ci->i_head_snapc);
1199 snapc = ceph_get_snap_context(ci->i_head_snapc);
1201 spin_unlock(&ci->i_ceph_lock);
1203 req = ceph_osdc_alloc_request(orig_req->r_osdc, snapc, 1,
1211 req->r_flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1212 ceph_oloc_copy(&req->r_base_oloc, &orig_req->r_base_oloc);
1213 ceph_oid_copy(&req->r_base_oid, &orig_req->r_base_oid);
1215 req->r_ops[0] = orig_req->r_ops[0];
1217 req->r_mtime = aio_req->mtime;
1218 req->r_data_offset = req->r_ops[0].extent.offset;
1220 ret = ceph_osdc_alloc_messages(req, GFP_NOFS);
1222 ceph_osdc_put_request(req);
1227 ceph_osdc_put_request(orig_req);
1229 req->r_callback = ceph_aio_complete_req;
1230 req->r_inode = inode;
1231 req->r_priv = aio_req;
1233 ret = ceph_osdc_start_request(req->r_osdc, req, false);
1236 req->r_result = ret;
1237 ceph_aio_complete_req(req);
1240 ceph_put_snap_context(snapc);
1245 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
1246 struct ceph_snap_context *snapc,
1247 struct ceph_cap_flush **pcf)
1249 struct file *file = iocb->ki_filp;
1250 struct inode *inode = file_inode(file);
1251 struct ceph_inode_info *ci = ceph_inode(inode);
1252 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1253 struct ceph_client_metric *metric = &fsc->mdsc->metric;
1254 struct ceph_vino vino;
1255 struct ceph_osd_request *req;
1256 struct bio_vec *bvecs;
1257 struct ceph_aio_request *aio_req = NULL;
1261 struct timespec64 mtime = current_time(inode);
1262 size_t count = iov_iter_count(iter);
1263 loff_t pos = iocb->ki_pos;
1264 bool write = iov_iter_rw(iter) == WRITE;
1265 bool should_dirty = !write && iter_is_iovec(iter);
1267 if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1270 dout("sync_direct_%s on file %p %lld~%u snapc %p seq %lld\n",
1271 (write ? "write" : "read"), file, pos, (unsigned)count,
1272 snapc, snapc ? snapc->seq : 0);
1277 ceph_fscache_invalidate(inode, true);
1279 ret2 = invalidate_inode_pages2_range(inode->i_mapping,
1281 (pos + count - 1) >> PAGE_SHIFT);
1283 dout("invalidate_inode_pages2_range returned %d\n", ret2);
1285 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1287 flags = CEPH_OSD_FLAG_READ;
1290 while (iov_iter_count(iter) > 0) {
1291 u64 size = iov_iter_count(iter);
1295 size = min_t(u64, size, fsc->mount_options->wsize);
1297 size = min_t(u64, size, fsc->mount_options->rsize);
1299 vino = ceph_vino(inode);
1300 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1301 vino, pos, &size, 0,
1303 write ? CEPH_OSD_OP_WRITE :
1307 ci->i_truncate_size,
1314 len = iter_get_bvecs_alloc(iter, size, &bvecs, &num_pages);
1316 ceph_osdc_put_request(req);
1321 osd_req_op_extent_update(req, 0, len);
1324 * To simplify error handling, allow AIO when IO within i_size
1325 * or IO can be satisfied by single OSD request.
1327 if (pos == iocb->ki_pos && !is_sync_kiocb(iocb) &&
1328 (len == count || pos + count <= i_size_read(inode))) {
1329 aio_req = kzalloc(sizeof(*aio_req), GFP_KERNEL);
1331 aio_req->iocb = iocb;
1332 aio_req->write = write;
1333 aio_req->should_dirty = should_dirty;
1334 INIT_LIST_HEAD(&aio_req->osd_reqs);
1336 aio_req->mtime = mtime;
1337 swap(aio_req->prealloc_cf, *pcf);
1345 * throw out any page cache pages in this range. this
1348 truncate_inode_pages_range(inode->i_mapping, pos,
1349 PAGE_ALIGN(pos + len) - 1);
1351 req->r_mtime = mtime;
1354 osd_req_op_extent_osd_data_bvecs(req, 0, bvecs, num_pages, len);
1357 aio_req->total_len += len;
1358 aio_req->num_reqs++;
1359 atomic_inc(&aio_req->pending_reqs);
1361 req->r_callback = ceph_aio_complete_req;
1362 req->r_inode = inode;
1363 req->r_priv = aio_req;
1364 list_add_tail(&req->r_private_item, &aio_req->osd_reqs);
1370 ret = ceph_osdc_start_request(req->r_osdc, req, false);
1372 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1375 ceph_update_write_metrics(metric, req->r_start_latency,
1376 req->r_end_latency, len, ret);
1378 ceph_update_read_metrics(metric, req->r_start_latency,
1379 req->r_end_latency, len, ret);
1381 size = i_size_read(inode);
1385 if (ret >= 0 && ret < len && pos + ret < size) {
1387 int zlen = min_t(size_t, len - ret,
1390 iov_iter_bvec(&i, READ, bvecs, num_pages, len);
1391 iov_iter_advance(&i, ret);
1392 iov_iter_zero(zlen, &i);
1399 put_bvecs(bvecs, num_pages, should_dirty);
1400 ceph_osdc_put_request(req);
1405 if (!write && pos >= size)
1408 if (write && pos > size) {
1409 if (ceph_inode_set_size(inode, pos))
1410 ceph_check_caps(ceph_inode(inode),
1411 CHECK_CAPS_AUTHONLY,
1417 LIST_HEAD(osd_reqs);
1419 if (aio_req->num_reqs == 0) {
1424 ceph_get_cap_refs(ci, write ? CEPH_CAP_FILE_WR :
1427 list_splice(&aio_req->osd_reqs, &osd_reqs);
1428 inode_dio_begin(inode);
1429 while (!list_empty(&osd_reqs)) {
1430 req = list_first_entry(&osd_reqs,
1431 struct ceph_osd_request,
1433 list_del_init(&req->r_private_item);
1435 ret = ceph_osdc_start_request(req->r_osdc,
1438 req->r_result = ret;
1439 ceph_aio_complete_req(req);
1442 return -EIOCBQUEUED;
1445 if (ret != -EOLDSNAPC && pos > iocb->ki_pos) {
1446 ret = pos - iocb->ki_pos;
1453 * Synchronous write, straight from __user pointer or user pages.
1455 * If write spans object boundary, just do multiple writes. (For a
1456 * correct atomic write, we should e.g. take write locks on all
1457 * objects, rollback on failure, etc.)
1460 ceph_sync_write(struct kiocb *iocb, struct iov_iter *from, loff_t pos,
1461 struct ceph_snap_context *snapc)
1463 struct file *file = iocb->ki_filp;
1464 struct inode *inode = file_inode(file);
1465 struct ceph_inode_info *ci = ceph_inode(inode);
1466 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1467 struct ceph_vino vino;
1468 struct ceph_osd_request *req;
1469 struct page **pages;
1475 bool check_caps = false;
1476 struct timespec64 mtime = current_time(inode);
1477 size_t count = iov_iter_count(from);
1479 if (ceph_snap(file_inode(file)) != CEPH_NOSNAP)
1482 dout("sync_write on file %p %lld~%u snapc %p seq %lld\n",
1483 file, pos, (unsigned)count, snapc, snapc->seq);
1485 ret = filemap_write_and_wait_range(inode->i_mapping,
1486 pos, pos + count - 1);
1490 ceph_fscache_invalidate(inode, false);
1491 ret = invalidate_inode_pages2_range(inode->i_mapping,
1493 (pos + count - 1) >> PAGE_SHIFT);
1495 dout("invalidate_inode_pages2_range returned %d\n", ret);
1497 flags = /* CEPH_OSD_FLAG_ORDERSNAP | */ CEPH_OSD_FLAG_WRITE;
1499 while ((len = iov_iter_count(from)) > 0) {
1503 vino = ceph_vino(inode);
1504 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
1505 vino, pos, &len, 0, 1,
1506 CEPH_OSD_OP_WRITE, flags, snapc,
1508 ci->i_truncate_size,
1516 * write from beginning of first page,
1517 * regardless of io alignment
1519 num_pages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1521 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1522 if (IS_ERR(pages)) {
1523 ret = PTR_ERR(pages);
1528 for (n = 0; n < num_pages; n++) {
1529 size_t plen = min_t(size_t, left, PAGE_SIZE);
1530 ret = copy_page_from_iter(pages[n], 0, plen, from);
1539 ceph_release_page_vector(pages, num_pages);
1543 req->r_inode = inode;
1545 osd_req_op_extent_osd_data_pages(req, 0, pages, len, 0,
1548 req->r_mtime = mtime;
1549 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
1551 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
1553 ceph_update_write_metrics(&fsc->mdsc->metric, req->r_start_latency,
1554 req->r_end_latency, len, ret);
1556 ceph_osdc_put_request(req);
1558 ceph_set_error_write(ci);
1562 ceph_clear_error_write(ci);
1565 if (pos > i_size_read(inode)) {
1566 check_caps = ceph_inode_set_size(inode, pos);
1568 ceph_check_caps(ceph_inode(inode),
1569 CHECK_CAPS_AUTHONLY,
1575 if (ret != -EOLDSNAPC && written > 0) {
1583 * Wrap generic_file_aio_read with checks for cap bits on the inode.
1584 * Atomically grab references, so that those bits are not released
1585 * back to the MDS mid-read.
1587 * Hmm, the sync read case isn't actually async... should it be?
1589 static ssize_t ceph_read_iter(struct kiocb *iocb, struct iov_iter *to)
1591 struct file *filp = iocb->ki_filp;
1592 struct ceph_file_info *fi = filp->private_data;
1593 size_t len = iov_iter_count(to);
1594 struct inode *inode = file_inode(filp);
1595 struct ceph_inode_info *ci = ceph_inode(inode);
1596 bool direct_lock = iocb->ki_flags & IOCB_DIRECT;
1598 int want = 0, got = 0;
1599 int retry_op = 0, read = 0;
1602 dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
1603 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
1605 if (ceph_inode_is_shutdown(inode))
1609 ceph_start_io_direct(inode);
1611 ceph_start_io_read(inode);
1613 if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
1614 want |= CEPH_CAP_FILE_CACHE;
1615 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1616 want |= CEPH_CAP_FILE_LAZYIO;
1618 ret = ceph_get_caps(filp, CEPH_CAP_FILE_RD, want, -1, &got);
1621 ceph_end_io_direct(inode);
1623 ceph_end_io_read(inode);
1627 if ((got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1628 (iocb->ki_flags & IOCB_DIRECT) ||
1629 (fi->flags & CEPH_F_SYNC)) {
1631 dout("aio_sync_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1632 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1633 ceph_cap_string(got));
1635 if (ci->i_inline_version == CEPH_INLINE_NONE) {
1636 if (!retry_op && (iocb->ki_flags & IOCB_DIRECT)) {
1637 ret = ceph_direct_read_write(iocb, to,
1639 if (ret >= 0 && ret < len)
1640 retry_op = CHECK_EOF;
1642 ret = ceph_sync_read(iocb, to, &retry_op);
1645 retry_op = READ_INLINE;
1648 CEPH_DEFINE_RW_CONTEXT(rw_ctx, got);
1649 dout("aio_read %p %llx.%llx %llu~%u got cap refs on %s\n",
1650 inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len,
1651 ceph_cap_string(got));
1652 ceph_add_rw_context(fi, &rw_ctx);
1653 ret = generic_file_read_iter(iocb, to);
1654 ceph_del_rw_context(fi, &rw_ctx);
1657 dout("aio_read %p %llx.%llx dropping cap refs on %s = %d\n",
1658 inode, ceph_vinop(inode), ceph_cap_string(got), (int)ret);
1659 ceph_put_cap_refs(ci, got);
1662 ceph_end_io_direct(inode);
1664 ceph_end_io_read(inode);
1666 if (retry_op > HAVE_RETRIED && ret >= 0) {
1668 struct page *page = NULL;
1670 if (retry_op == READ_INLINE) {
1671 page = __page_cache_alloc(GFP_KERNEL);
1676 statret = __ceph_do_getattr(inode, page,
1677 CEPH_STAT_CAP_INLINE_DATA, !!page);
1681 if (statret == -ENODATA) {
1682 BUG_ON(retry_op != READ_INLINE);
1688 i_size = i_size_read(inode);
1689 if (retry_op == READ_INLINE) {
1690 BUG_ON(ret > 0 || read > 0);
1691 if (iocb->ki_pos < i_size &&
1692 iocb->ki_pos < PAGE_SIZE) {
1693 loff_t end = min_t(loff_t, i_size,
1694 iocb->ki_pos + len);
1695 end = min_t(loff_t, end, PAGE_SIZE);
1697 zero_user_segment(page, statret, end);
1698 ret = copy_page_to_iter(page,
1699 iocb->ki_pos & ~PAGE_MASK,
1700 end - iocb->ki_pos, to);
1701 iocb->ki_pos += ret;
1704 if (iocb->ki_pos < i_size && read < len) {
1705 size_t zlen = min_t(size_t, len - read,
1706 i_size - iocb->ki_pos);
1707 ret = iov_iter_zero(zlen, to);
1708 iocb->ki_pos += ret;
1711 __free_pages(page, 0);
1715 /* hit EOF or hole? */
1716 if (retry_op == CHECK_EOF && iocb->ki_pos < i_size &&
1718 dout("sync_read hit hole, ppos %lld < size %lld"
1719 ", reading more\n", iocb->ki_pos, i_size);
1723 retry_op = HAVE_RETRIED;
1735 * Take cap references to avoid releasing caps to MDS mid-write.
1737 * If we are synchronous, and write with an old snap context, the OSD
1738 * may return EOLDSNAPC. In that case, retry the write.. _after_
1739 * dropping our cap refs and allowing the pending snap to logically
1740 * complete _before_ this write occurs.
1742 * If we are near ENOSPC, write synchronously.
1744 static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
1746 struct file *file = iocb->ki_filp;
1747 struct ceph_file_info *fi = file->private_data;
1748 struct inode *inode = file_inode(file);
1749 struct ceph_inode_info *ci = ceph_inode(inode);
1750 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1751 struct ceph_osd_client *osdc = &fsc->client->osdc;
1752 struct ceph_cap_flush *prealloc_cf;
1753 ssize_t count, written = 0;
1754 int err, want = 0, got;
1755 bool direct_lock = false;
1759 loff_t limit = max(i_size_read(inode), fsc->max_file_size);
1761 if (ceph_inode_is_shutdown(inode))
1764 if (ceph_snap(inode) != CEPH_NOSNAP)
1767 prealloc_cf = ceph_alloc_cap_flush();
1771 if ((iocb->ki_flags & (IOCB_DIRECT | IOCB_APPEND)) == IOCB_DIRECT)
1776 ceph_start_io_direct(inode);
1778 ceph_start_io_write(inode);
1780 /* We can write back this queue in page reclaim */
1781 current->backing_dev_info = inode_to_bdi(inode);
1783 if (iocb->ki_flags & IOCB_APPEND) {
1784 err = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1789 err = generic_write_checks(iocb, from);
1794 if (unlikely(pos >= limit)) {
1798 iov_iter_truncate(from, limit - pos);
1801 count = iov_iter_count(from);
1802 if (ceph_quota_is_max_bytes_exceeded(inode, pos + count)) {
1807 down_read(&osdc->lock);
1808 map_flags = osdc->osdmap->flags;
1809 pool_flags = ceph_pg_pool_flags(osdc->osdmap, ci->i_layout.pool_id);
1810 up_read(&osdc->lock);
1811 if ((map_flags & CEPH_OSDMAP_FULL) ||
1812 (pool_flags & CEPH_POOL_FLAG_FULL)) {
1817 err = file_remove_privs(file);
1821 dout("aio_write %p %llx.%llx %llu~%zd getting caps. i_size %llu\n",
1822 inode, ceph_vinop(inode), pos, count, i_size_read(inode));
1823 if (!(fi->flags & CEPH_F_SYNC) && !direct_lock)
1824 want |= CEPH_CAP_FILE_BUFFER;
1825 if (fi->fmode & CEPH_FILE_MODE_LAZY)
1826 want |= CEPH_CAP_FILE_LAZYIO;
1828 err = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, pos + count, &got);
1832 err = file_update_time(file);
1836 inode_inc_iversion_raw(inode);
1838 dout("aio_write %p %llx.%llx %llu~%zd got cap refs on %s\n",
1839 inode, ceph_vinop(inode), pos, count, ceph_cap_string(got));
1841 if ((got & (CEPH_CAP_FILE_BUFFER|CEPH_CAP_FILE_LAZYIO)) == 0 ||
1842 (iocb->ki_flags & IOCB_DIRECT) || (fi->flags & CEPH_F_SYNC) ||
1843 (ci->i_ceph_flags & CEPH_I_ERROR_WRITE)) {
1844 struct ceph_snap_context *snapc;
1845 struct iov_iter data;
1847 spin_lock(&ci->i_ceph_lock);
1848 if (__ceph_have_pending_cap_snap(ci)) {
1849 struct ceph_cap_snap *capsnap =
1850 list_last_entry(&ci->i_cap_snaps,
1851 struct ceph_cap_snap,
1853 snapc = ceph_get_snap_context(capsnap->context);
1855 BUG_ON(!ci->i_head_snapc);
1856 snapc = ceph_get_snap_context(ci->i_head_snapc);
1858 spin_unlock(&ci->i_ceph_lock);
1860 /* we might need to revert back to that point */
1862 if (iocb->ki_flags & IOCB_DIRECT)
1863 written = ceph_direct_read_write(iocb, &data, snapc,
1866 written = ceph_sync_write(iocb, &data, pos, snapc);
1868 ceph_end_io_direct(inode);
1870 ceph_end_io_write(inode);
1872 iov_iter_advance(from, written);
1873 ceph_put_snap_context(snapc);
1876 * No need to acquire the i_truncate_mutex. Because
1877 * the MDS revokes Fwb caps before sending truncate
1878 * message to us. We can't get Fwb cap while there
1879 * are pending vmtruncate. So write and vmtruncate
1880 * can not run at the same time
1882 written = generic_perform_write(iocb, from);
1883 if (likely(written >= 0))
1884 iocb->ki_pos = pos + written;
1885 ceph_end_io_write(inode);
1891 spin_lock(&ci->i_ceph_lock);
1892 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
1894 spin_unlock(&ci->i_ceph_lock);
1896 __mark_inode_dirty(inode, dirty);
1897 if (ceph_quota_is_max_bytes_approaching(inode, iocb->ki_pos))
1898 ceph_check_caps(ci, 0, NULL);
1901 dout("aio_write %p %llx.%llx %llu~%u dropping cap refs on %s\n",
1902 inode, ceph_vinop(inode), pos, (unsigned)count,
1903 ceph_cap_string(got));
1904 ceph_put_cap_refs(ci, got);
1906 if (written == -EOLDSNAPC) {
1907 dout("aio_write %p %llx.%llx %llu~%u" "got EOLDSNAPC, retrying\n",
1908 inode, ceph_vinop(inode), pos, (unsigned)count);
1913 if ((map_flags & CEPH_OSDMAP_NEARFULL) ||
1914 (pool_flags & CEPH_POOL_FLAG_NEARFULL))
1915 iocb->ki_flags |= IOCB_DSYNC;
1916 written = generic_write_sync(iocb, written);
1921 ceph_put_cap_refs(ci, got);
1924 ceph_end_io_direct(inode);
1926 ceph_end_io_write(inode);
1928 ceph_free_cap_flush(prealloc_cf);
1929 current->backing_dev_info = NULL;
1930 return written ? written : err;
1934 * llseek. be sure to verify file size on SEEK_END.
1936 static loff_t ceph_llseek(struct file *file, loff_t offset, int whence)
1938 struct inode *inode = file->f_mapping->host;
1939 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
1945 if (whence == SEEK_END || whence == SEEK_DATA || whence == SEEK_HOLE) {
1946 ret = ceph_do_getattr(inode, CEPH_STAT_CAP_SIZE, false);
1951 i_size = i_size_read(inode);
1958 * Here we special-case the lseek(fd, 0, SEEK_CUR)
1959 * position-querying operation. Avoid rewriting the "same"
1960 * f_pos value back to the file because a concurrent read(),
1961 * write() or lseek() might have altered it
1967 offset += file->f_pos;
1970 if (offset < 0 || offset >= i_size) {
1976 if (offset < 0 || offset >= i_size) {
1984 ret = vfs_setpos(file, offset, max(i_size, fsc->max_file_size));
1987 inode_unlock(inode);
1991 static inline void ceph_zero_partial_page(
1992 struct inode *inode, loff_t offset, unsigned size)
1995 pgoff_t index = offset >> PAGE_SHIFT;
1997 page = find_lock_page(inode->i_mapping, index);
1999 wait_on_page_writeback(page);
2000 zero_user(page, offset & (PAGE_SIZE - 1), size);
2006 static void ceph_zero_pagecache_range(struct inode *inode, loff_t offset,
2009 loff_t nearly = round_up(offset, PAGE_SIZE);
2010 if (offset < nearly) {
2011 loff_t size = nearly - offset;
2014 ceph_zero_partial_page(inode, offset, size);
2018 if (length >= PAGE_SIZE) {
2019 loff_t size = round_down(length, PAGE_SIZE);
2020 truncate_pagecache_range(inode, offset, offset + size - 1);
2025 ceph_zero_partial_page(inode, offset, length);
2028 static int ceph_zero_partial_object(struct inode *inode,
2029 loff_t offset, loff_t *length)
2031 struct ceph_inode_info *ci = ceph_inode(inode);
2032 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
2033 struct ceph_osd_request *req;
2039 op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
2042 op = CEPH_OSD_OP_ZERO;
2045 req = ceph_osdc_new_request(&fsc->client->osdc, &ci->i_layout,
2049 CEPH_OSD_FLAG_WRITE,
2056 req->r_mtime = inode->i_mtime;
2057 ret = ceph_osdc_start_request(&fsc->client->osdc, req, false);
2059 ret = ceph_osdc_wait_request(&fsc->client->osdc, req);
2063 ceph_osdc_put_request(req);
2069 static int ceph_zero_objects(struct inode *inode, loff_t offset, loff_t length)
2072 struct ceph_inode_info *ci = ceph_inode(inode);
2073 s32 stripe_unit = ci->i_layout.stripe_unit;
2074 s32 stripe_count = ci->i_layout.stripe_count;
2075 s32 object_size = ci->i_layout.object_size;
2076 u64 object_set_size = object_size * stripe_count;
2079 /* round offset up to next period boundary */
2080 nearly = offset + object_set_size - 1;
2082 nearly -= do_div(t, object_set_size);
2084 while (length && offset < nearly) {
2085 loff_t size = length;
2086 ret = ceph_zero_partial_object(inode, offset, &size);
2092 while (length >= object_set_size) {
2094 loff_t pos = offset;
2095 for (i = 0; i < stripe_count; ++i) {
2096 ret = ceph_zero_partial_object(inode, pos, NULL);
2101 offset += object_set_size;
2102 length -= object_set_size;
2105 loff_t size = length;
2106 ret = ceph_zero_partial_object(inode, offset, &size);
2115 static long ceph_fallocate(struct file *file, int mode,
2116 loff_t offset, loff_t length)
2118 struct ceph_file_info *fi = file->private_data;
2119 struct inode *inode = file_inode(file);
2120 struct ceph_inode_info *ci = ceph_inode(inode);
2121 struct ceph_cap_flush *prealloc_cf;
2128 if (mode != (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2131 if (!S_ISREG(inode->i_mode))
2134 prealloc_cf = ceph_alloc_cap_flush();
2140 if (ceph_snap(inode) != CEPH_NOSNAP) {
2145 size = i_size_read(inode);
2147 /* Are we punching a hole beyond EOF? */
2150 if ((offset + length) > size)
2151 length = size - offset;
2153 if (fi->fmode & CEPH_FILE_MODE_LAZY)
2154 want = CEPH_CAP_FILE_BUFFER | CEPH_CAP_FILE_LAZYIO;
2156 want = CEPH_CAP_FILE_BUFFER;
2158 ret = ceph_get_caps(file, CEPH_CAP_FILE_WR, want, endoff, &got);
2162 filemap_invalidate_lock(inode->i_mapping);
2163 ceph_fscache_invalidate(inode, false);
2164 ceph_zero_pagecache_range(inode, offset, length);
2165 ret = ceph_zero_objects(inode, offset, length);
2168 spin_lock(&ci->i_ceph_lock);
2169 dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR,
2171 spin_unlock(&ci->i_ceph_lock);
2173 __mark_inode_dirty(inode, dirty);
2175 filemap_invalidate_unlock(inode->i_mapping);
2177 ceph_put_cap_refs(ci, got);
2179 inode_unlock(inode);
2180 ceph_free_cap_flush(prealloc_cf);
2185 * This function tries to get FILE_WR capabilities for dst_ci and FILE_RD for
2186 * src_ci. Two attempts are made to obtain both caps, and an error is return if
2187 * this fails; zero is returned on success.
2189 static int get_rd_wr_caps(struct file *src_filp, int *src_got,
2190 struct file *dst_filp,
2191 loff_t dst_endoff, int *dst_got)
2194 bool retrying = false;
2197 ret = ceph_get_caps(dst_filp, CEPH_CAP_FILE_WR, CEPH_CAP_FILE_BUFFER,
2198 dst_endoff, dst_got);
2203 * Since we're already holding the FILE_WR capability for the dst file,
2204 * we would risk a deadlock by using ceph_get_caps. Thus, we'll do some
2205 * retry dance instead to try to get both capabilities.
2207 ret = ceph_try_get_caps(file_inode(src_filp),
2208 CEPH_CAP_FILE_RD, CEPH_CAP_FILE_SHARED,
2211 /* Start by dropping dst_ci caps and getting src_ci caps */
2212 ceph_put_cap_refs(ceph_inode(file_inode(dst_filp)), *dst_got);
2215 /* ceph_try_get_caps masks EAGAIN */
2219 ret = ceph_get_caps(src_filp, CEPH_CAP_FILE_RD,
2220 CEPH_CAP_FILE_SHARED, -1, src_got);
2223 /*... drop src_ci caps too, and retry */
2224 ceph_put_cap_refs(ceph_inode(file_inode(src_filp)), *src_got);
2231 static void put_rd_wr_caps(struct ceph_inode_info *src_ci, int src_got,
2232 struct ceph_inode_info *dst_ci, int dst_got)
2234 ceph_put_cap_refs(src_ci, src_got);
2235 ceph_put_cap_refs(dst_ci, dst_got);
2239 * This function does several size-related checks, returning an error if:
2240 * - source file is smaller than off+len
2241 * - destination file size is not OK (inode_newsize_ok())
2242 * - max bytes quotas is exceeded
2244 static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
2245 loff_t src_off, loff_t dst_off, size_t len)
2247 loff_t size, endoff;
2249 size = i_size_read(src_inode);
2251 * Don't copy beyond source file EOF. Instead of simply setting length
2252 * to (size - src_off), just drop to VFS default implementation, as the
2253 * local i_size may be stale due to other clients writing to the source
2256 if (src_off + len > size) {
2257 dout("Copy beyond EOF (%llu + %zu > %llu)\n",
2258 src_off, len, size);
2261 size = i_size_read(dst_inode);
2263 endoff = dst_off + len;
2264 if (inode_newsize_ok(dst_inode, endoff))
2267 if (ceph_quota_is_max_bytes_exceeded(dst_inode, endoff))
2273 static struct ceph_osd_request *
2274 ceph_alloc_copyfrom_request(struct ceph_osd_client *osdc,
2276 struct ceph_object_id *src_oid,
2277 struct ceph_object_locator *src_oloc,
2278 struct ceph_object_id *dst_oid,
2279 struct ceph_object_locator *dst_oloc,
2280 u32 truncate_seq, u64 truncate_size)
2282 struct ceph_osd_request *req;
2284 u32 src_fadvise_flags =
2285 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2286 CEPH_OSD_OP_FLAG_FADVISE_NOCACHE;
2287 u32 dst_fadvise_flags =
2288 CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
2289 CEPH_OSD_OP_FLAG_FADVISE_DONTNEED;
2291 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
2293 return ERR_PTR(-ENOMEM);
2295 req->r_flags = CEPH_OSD_FLAG_WRITE;
2297 ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc);
2298 ceph_oid_copy(&req->r_t.base_oid, dst_oid);
2300 ret = osd_req_op_copy_from_init(req, src_snapid, 0,
2306 CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ);
2310 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
2317 ceph_osdc_put_request(req);
2318 return ERR_PTR(ret);
2321 static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off,
2322 struct ceph_inode_info *dst_ci, u64 *dst_off,
2323 struct ceph_fs_client *fsc,
2324 size_t len, unsigned int flags)
2326 struct ceph_object_locator src_oloc, dst_oloc;
2327 struct ceph_object_id src_oid, dst_oid;
2328 struct ceph_osd_client *osdc;
2329 struct ceph_osd_request *req;
2331 u64 src_objnum, src_objoff, dst_objnum, dst_objoff;
2332 u32 src_objlen, dst_objlen;
2333 u32 object_size = src_ci->i_layout.object_size;
2336 src_oloc.pool = src_ci->i_layout.pool_id;
2337 src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns);
2338 dst_oloc.pool = dst_ci->i_layout.pool_id;
2339 dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns);
2340 osdc = &fsc->client->osdc;
2342 while (len >= object_size) {
2343 ceph_calc_file_object_mapping(&src_ci->i_layout, *src_off,
2344 object_size, &src_objnum,
2345 &src_objoff, &src_objlen);
2346 ceph_calc_file_object_mapping(&dst_ci->i_layout, *dst_off,
2347 object_size, &dst_objnum,
2348 &dst_objoff, &dst_objlen);
2349 ceph_oid_init(&src_oid);
2350 ceph_oid_printf(&src_oid, "%llx.%08llx",
2351 src_ci->i_vino.ino, src_objnum);
2352 ceph_oid_init(&dst_oid);
2353 ceph_oid_printf(&dst_oid, "%llx.%08llx",
2354 dst_ci->i_vino.ino, dst_objnum);
2355 /* Do an object remote copy */
2356 req = ceph_alloc_copyfrom_request(osdc, src_ci->i_vino.snap,
2357 &src_oid, &src_oloc,
2358 &dst_oid, &dst_oloc,
2359 dst_ci->i_truncate_seq,
2360 dst_ci->i_truncate_size);
2364 ceph_osdc_start_request(osdc, req, false);
2365 ret = ceph_osdc_wait_request(osdc, req);
2366 ceph_update_copyfrom_metrics(&fsc->mdsc->metric,
2367 req->r_start_latency,
2370 ceph_osdc_put_request(req);
2373 if (ret == -EOPNOTSUPP) {
2374 fsc->have_copy_from2 = false;
2375 pr_notice("OSDs don't support copy-from2; disabling copy offload\n");
2377 dout("ceph_osdc_copy_from returned %d\n", ret);
2383 bytes += object_size;
2384 *src_off += object_size;
2385 *dst_off += object_size;
2389 ceph_oloc_destroy(&src_oloc);
2390 ceph_oloc_destroy(&dst_oloc);
2394 static ssize_t __ceph_copy_file_range(struct file *src_file, loff_t src_off,
2395 struct file *dst_file, loff_t dst_off,
2396 size_t len, unsigned int flags)
2398 struct inode *src_inode = file_inode(src_file);
2399 struct inode *dst_inode = file_inode(dst_file);
2400 struct ceph_inode_info *src_ci = ceph_inode(src_inode);
2401 struct ceph_inode_info *dst_ci = ceph_inode(dst_inode);
2402 struct ceph_cap_flush *prealloc_cf;
2403 struct ceph_fs_client *src_fsc = ceph_inode_to_client(src_inode);
2405 ssize_t ret = -EIO, bytes;
2406 u64 src_objnum, dst_objnum, src_objoff, dst_objoff;
2407 u32 src_objlen, dst_objlen;
2408 int src_got = 0, dst_got = 0, err, dirty;
2410 if (src_inode->i_sb != dst_inode->i_sb) {
2411 struct ceph_fs_client *dst_fsc = ceph_inode_to_client(dst_inode);
2413 if (ceph_fsid_compare(&src_fsc->client->fsid,
2414 &dst_fsc->client->fsid)) {
2415 dout("Copying files across clusters: src: %pU dst: %pU\n",
2416 &src_fsc->client->fsid, &dst_fsc->client->fsid);
2420 if (ceph_snap(dst_inode) != CEPH_NOSNAP)
2424 * Some of the checks below will return -EOPNOTSUPP, which will force a
2425 * fallback to the default VFS copy_file_range implementation. This is
2426 * desirable in several cases (for ex, the 'len' is smaller than the
2427 * size of the objects, or in cases where that would be more
2431 if (ceph_test_mount_opt(src_fsc, NOCOPYFROM))
2434 if (!src_fsc->have_copy_from2)
2438 * Striped file layouts require that we copy partial objects, but the
2439 * OSD copy-from operation only supports full-object copies. Limit
2440 * this to non-striped file layouts for now.
2442 if ((src_ci->i_layout.stripe_unit != dst_ci->i_layout.stripe_unit) ||
2443 (src_ci->i_layout.stripe_count != 1) ||
2444 (dst_ci->i_layout.stripe_count != 1) ||
2445 (src_ci->i_layout.object_size != dst_ci->i_layout.object_size)) {
2446 dout("Invalid src/dst files layout\n");
2450 if (len < src_ci->i_layout.object_size)
2451 return -EOPNOTSUPP; /* no remote copy will be done */
2453 prealloc_cf = ceph_alloc_cap_flush();
2457 /* Start by sync'ing the source and destination files */
2458 ret = file_write_and_wait_range(src_file, src_off, (src_off + len));
2460 dout("failed to write src file (%zd)\n", ret);
2463 ret = file_write_and_wait_range(dst_file, dst_off, (dst_off + len));
2465 dout("failed to write dst file (%zd)\n", ret);
2470 * We need FILE_WR caps for dst_ci and FILE_RD for src_ci as other
2471 * clients may have dirty data in their caches. And OSDs know nothing
2472 * about caps, so they can't safely do the remote object copies.
2474 err = get_rd_wr_caps(src_file, &src_got,
2475 dst_file, (dst_off + len), &dst_got);
2477 dout("get_rd_wr_caps returned %d\n", err);
2482 ret = is_file_size_ok(src_inode, dst_inode, src_off, dst_off, len);
2486 /* Drop dst file cached pages */
2487 ceph_fscache_invalidate(dst_inode, false);
2488 ret = invalidate_inode_pages2_range(dst_inode->i_mapping,
2489 dst_off >> PAGE_SHIFT,
2490 (dst_off + len) >> PAGE_SHIFT);
2492 dout("Failed to invalidate inode pages (%zd)\n", ret);
2495 ceph_calc_file_object_mapping(&src_ci->i_layout, src_off,
2496 src_ci->i_layout.object_size,
2497 &src_objnum, &src_objoff, &src_objlen);
2498 ceph_calc_file_object_mapping(&dst_ci->i_layout, dst_off,
2499 dst_ci->i_layout.object_size,
2500 &dst_objnum, &dst_objoff, &dst_objlen);
2501 /* object-level offsets need to the same */
2502 if (src_objoff != dst_objoff) {
2508 * Do a manual copy if the object offset isn't object aligned.
2509 * 'src_objlen' contains the bytes left until the end of the object,
2510 * starting at the src_off
2513 dout("Initial partial copy of %u bytes\n", src_objlen);
2516 * we need to temporarily drop all caps as we'll be calling
2517 * {read,write}_iter, which will get caps again.
2519 put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2520 ret = do_splice_direct(src_file, &src_off, dst_file,
2521 &dst_off, src_objlen, flags);
2522 /* Abort on short copies or on error */
2523 if (ret < src_objlen) {
2524 dout("Failed partial copy (%zd)\n", ret);
2528 err = get_rd_wr_caps(src_file, &src_got,
2529 dst_file, (dst_off + len), &dst_got);
2532 err = is_file_size_ok(src_inode, dst_inode,
2533 src_off, dst_off, len);
2538 size = i_size_read(dst_inode);
2539 bytes = ceph_do_objects_copy(src_ci, &src_off, dst_ci, &dst_off,
2540 src_fsc, len, flags);
2546 dout("Copied %zu bytes out of %zu\n", bytes, len);
2550 file_update_time(dst_file);
2551 inode_inc_iversion_raw(dst_inode);
2553 if (dst_off > size) {
2554 /* Let the MDS know about dst file size change */
2555 if (ceph_inode_set_size(dst_inode, dst_off) ||
2556 ceph_quota_is_max_bytes_approaching(dst_inode, dst_off))
2557 ceph_check_caps(dst_ci, CHECK_CAPS_AUTHONLY, NULL);
2560 spin_lock(&dst_ci->i_ceph_lock);
2561 dirty = __ceph_mark_dirty_caps(dst_ci, CEPH_CAP_FILE_WR, &prealloc_cf);
2562 spin_unlock(&dst_ci->i_ceph_lock);
2564 __mark_inode_dirty(dst_inode, dirty);
2567 put_rd_wr_caps(src_ci, src_got, dst_ci, dst_got);
2570 * Do the final manual copy if we still have some bytes left, unless
2571 * there were errors in remote object copies (len >= object_size).
2573 if (len && (len < src_ci->i_layout.object_size)) {
2574 dout("Final partial copy of %zu bytes\n", len);
2575 bytes = do_splice_direct(src_file, &src_off, dst_file,
2576 &dst_off, len, flags);
2580 dout("Failed partial copy (%zd)\n", bytes);
2584 ceph_free_cap_flush(prealloc_cf);
2589 static ssize_t ceph_copy_file_range(struct file *src_file, loff_t src_off,
2590 struct file *dst_file, loff_t dst_off,
2591 size_t len, unsigned int flags)
2595 ret = __ceph_copy_file_range(src_file, src_off, dst_file, dst_off,
2598 if (ret == -EOPNOTSUPP || ret == -EXDEV)
2599 ret = generic_copy_file_range(src_file, src_off, dst_file,
2600 dst_off, len, flags);
2604 const struct file_operations ceph_file_fops = {
2606 .release = ceph_release,
2607 .llseek = ceph_llseek,
2608 .read_iter = ceph_read_iter,
2609 .write_iter = ceph_write_iter,
2611 .fsync = ceph_fsync,
2613 .setlease = simple_nosetlease,
2614 .flock = ceph_flock,
2615 .splice_read = generic_file_splice_read,
2616 .splice_write = iter_file_splice_write,
2617 .unlocked_ioctl = ceph_ioctl,
2618 .compat_ioctl = compat_ptr_ioctl,
2619 .fallocate = ceph_fallocate,
2620 .copy_file_range = ceph_copy_file_range,