4 * vfs operations that deal with files
6 * Copyright (C) International Business Machines Corp., 2002,2010
7 * Author(s): Steve French (sfrench@us.ibm.com)
8 * Jeremy Allison (jra@samba.org)
10 * This library is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU Lesser General Public License as published
12 * by the Free Software Foundation; either version 2.1 of the License, or
13 * (at your option) any later version.
15 * This library is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public License
21 * along with this library; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/backing-dev.h>
26 #include <linux/stat.h>
27 #include <linux/fcntl.h>
28 #include <linux/pagemap.h>
29 #include <linux/pagevec.h>
30 #include <linux/writeback.h>
31 #include <linux/task_io_accounting_ops.h>
32 #include <linux/delay.h>
33 #include <linux/mount.h>
34 #include <linux/slab.h>
35 #include <linux/swap.h>
36 #include <asm/div64.h>
40 #include "cifsproto.h"
41 #include "cifs_unicode.h"
42 #include "cifs_debug.h"
43 #include "cifs_fs_sb.h"
47 static inline int cifs_convert_flags(unsigned int flags)
49 if ((flags & O_ACCMODE) == O_RDONLY)
51 else if ((flags & O_ACCMODE) == O_WRONLY)
53 else if ((flags & O_ACCMODE) == O_RDWR) {
54 /* GENERIC_ALL is too much permission to request
55 can cause unnecessary access denied on create */
56 /* return GENERIC_ALL; */
57 return (GENERIC_READ | GENERIC_WRITE);
60 return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
61 FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
65 static u32 cifs_posix_convert_flags(unsigned int flags)
69 if ((flags & O_ACCMODE) == O_RDONLY)
70 posix_flags = SMB_O_RDONLY;
71 else if ((flags & O_ACCMODE) == O_WRONLY)
72 posix_flags = SMB_O_WRONLY;
73 else if ((flags & O_ACCMODE) == O_RDWR)
74 posix_flags = SMB_O_RDWR;
76 if (flags & O_CREAT) {
77 posix_flags |= SMB_O_CREAT;
79 posix_flags |= SMB_O_EXCL;
80 } else if (flags & O_EXCL)
81 cifs_dbg(FYI, "Application %s pid %d has incorrectly set O_EXCL flag but not O_CREAT on file open. Ignoring O_EXCL\n",
82 current->comm, current->tgid);
85 posix_flags |= SMB_O_TRUNC;
86 /* be safe and imply O_SYNC for O_DSYNC */
88 posix_flags |= SMB_O_SYNC;
89 if (flags & O_DIRECTORY)
90 posix_flags |= SMB_O_DIRECTORY;
91 if (flags & O_NOFOLLOW)
92 posix_flags |= SMB_O_NOFOLLOW;
94 posix_flags |= SMB_O_DIRECT;
99 static inline int cifs_get_disposition(unsigned int flags)
101 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
103 else if ((flags & (O_CREAT | O_TRUNC)) == (O_CREAT | O_TRUNC))
104 return FILE_OVERWRITE_IF;
105 else if ((flags & O_CREAT) == O_CREAT)
107 else if ((flags & O_TRUNC) == O_TRUNC)
108 return FILE_OVERWRITE;
113 int cifs_posix_open(char *full_path, struct inode **pinode,
114 struct super_block *sb, int mode, unsigned int f_flags,
115 __u32 *poplock, __u16 *pnetfid, unsigned int xid)
118 FILE_UNIX_BASIC_INFO *presp_data;
119 __u32 posix_flags = 0;
120 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
121 struct cifs_fattr fattr;
122 struct tcon_link *tlink;
123 struct cifs_tcon *tcon;
125 cifs_dbg(FYI, "posix open %s\n", full_path);
127 presp_data = kzalloc(sizeof(FILE_UNIX_BASIC_INFO), GFP_KERNEL);
128 if (presp_data == NULL)
131 tlink = cifs_sb_tlink(cifs_sb);
137 tcon = tlink_tcon(tlink);
138 mode &= ~current_umask();
140 posix_flags = cifs_posix_convert_flags(f_flags);
141 rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
142 poplock, full_path, cifs_sb->local_nls,
143 cifs_remap(cifs_sb));
144 cifs_put_tlink(tlink);
149 if (presp_data->Type == cpu_to_le32(-1))
150 goto posix_open_ret; /* open ok, caller does qpathinfo */
153 goto posix_open_ret; /* caller does not need info */
155 cifs_unix_basic_to_fattr(&fattr, presp_data, cifs_sb);
157 /* get new inode and set it up */
158 if (*pinode == NULL) {
159 cifs_fill_uniqueid(sb, &fattr);
160 *pinode = cifs_iget(sb, &fattr);
166 cifs_revalidate_mapping(*pinode);
167 cifs_fattr_to_inode(*pinode, &fattr);
176 cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
177 struct cifs_tcon *tcon, unsigned int f_flags, __u32 *oplock,
178 struct cifs_fid *fid, unsigned int xid)
183 int create_options = CREATE_NOT_DIR;
185 struct TCP_Server_Info *server = tcon->ses->server;
186 struct cifs_open_parms oparms;
188 if (!server->ops->open)
191 desired_access = cifs_convert_flags(f_flags);
193 /*********************************************************************
194 * open flag mapping table:
196 * POSIX Flag CIFS Disposition
197 * ---------- ----------------
198 * O_CREAT FILE_OPEN_IF
199 * O_CREAT | O_EXCL FILE_CREATE
200 * O_CREAT | O_TRUNC FILE_OVERWRITE_IF
201 * O_TRUNC FILE_OVERWRITE
202 * none of the above FILE_OPEN
204 * Note that there is not a direct match between disposition
205 * FILE_SUPERSEDE (ie create whether or not file exists although
206 * O_CREAT | O_TRUNC is similar but truncates the existing
207 * file rather than creating a new file as FILE_SUPERSEDE does
208 * (which uses the attributes / metadata passed in on open call)
210 *? O_SYNC is a reasonable match to CIFS writethrough flag
211 *? and the read write flags match reasonably. O_LARGEFILE
212 *? is irrelevant because largefile support is always used
213 *? by this client. Flags O_APPEND, O_DIRECT, O_DIRECTORY,
214 * O_FASYNC, O_NOFOLLOW, O_NONBLOCK need further investigation
215 *********************************************************************/
217 disposition = cifs_get_disposition(f_flags);
219 /* BB pass O_SYNC flag through on file attributes .. BB */
221 buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL);
225 if (backup_cred(cifs_sb))
226 create_options |= CREATE_OPEN_BACKUP_INTENT;
228 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
229 if (f_flags & O_SYNC)
230 create_options |= CREATE_WRITE_THROUGH;
232 if (f_flags & O_DIRECT)
233 create_options |= CREATE_NO_BUFFER;
236 oparms.cifs_sb = cifs_sb;
237 oparms.desired_access = desired_access;
238 oparms.create_options = create_options;
239 oparms.disposition = disposition;
240 oparms.path = full_path;
242 oparms.reconnect = false;
244 rc = server->ops->open(xid, &oparms, oplock, buf);
250 rc = cifs_get_inode_info_unix(&inode, full_path, inode->i_sb,
253 rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
257 server->ops->close(xid, tcon, fid);
268 cifs_has_mand_locks(struct cifsInodeInfo *cinode)
270 struct cifs_fid_locks *cur;
271 bool has_locks = false;
273 down_read(&cinode->lock_sem);
274 list_for_each_entry(cur, &cinode->llist, llist) {
275 if (!list_empty(&cur->locks)) {
280 up_read(&cinode->lock_sem);
285 cifs_down_write(struct rw_semaphore *sem)
287 while (!down_write_trylock(sem))
291 struct cifsFileInfo *
292 cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,
293 struct tcon_link *tlink, __u32 oplock)
295 struct dentry *dentry = file_dentry(file);
296 struct inode *inode = d_inode(dentry);
297 struct cifsInodeInfo *cinode = CIFS_I(inode);
298 struct cifsFileInfo *cfile;
299 struct cifs_fid_locks *fdlocks;
300 struct cifs_tcon *tcon = tlink_tcon(tlink);
301 struct TCP_Server_Info *server = tcon->ses->server;
303 cfile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
307 fdlocks = kzalloc(sizeof(struct cifs_fid_locks), GFP_KERNEL);
313 INIT_LIST_HEAD(&fdlocks->locks);
314 fdlocks->cfile = cfile;
315 cfile->llist = fdlocks;
318 cfile->pid = current->tgid;
319 cfile->uid = current_fsuid();
320 cfile->dentry = dget(dentry);
321 cfile->f_flags = file->f_flags;
322 cfile->invalidHandle = false;
323 cfile->tlink = cifs_get_tlink(tlink);
324 INIT_WORK(&cfile->oplock_break, cifs_oplock_break);
325 mutex_init(&cfile->fh_mutex);
326 spin_lock_init(&cfile->file_info_lock);
328 cifs_sb_active(inode->i_sb);
331 * If the server returned a read oplock and we have mandatory brlocks,
332 * set oplock level to None.
334 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
335 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
339 cifs_down_write(&cinode->lock_sem);
340 list_add(&fdlocks->llist, &cinode->llist);
341 up_write(&cinode->lock_sem);
343 spin_lock(&tcon->open_file_lock);
344 if (fid->pending_open->oplock != CIFS_OPLOCK_NO_CHANGE && oplock)
345 oplock = fid->pending_open->oplock;
346 list_del(&fid->pending_open->olist);
348 fid->purge_cache = false;
349 server->ops->set_fid(cfile, fid, oplock);
351 list_add(&cfile->tlist, &tcon->openFileList);
353 /* if readable file instance put first in list*/
354 if (file->f_mode & FMODE_READ)
355 list_add(&cfile->flist, &cinode->openFileList);
357 list_add_tail(&cfile->flist, &cinode->openFileList);
358 spin_unlock(&tcon->open_file_lock);
360 if (fid->purge_cache)
361 cifs_zap_mapping(inode);
363 file->private_data = cfile;
367 struct cifsFileInfo *
368 cifsFileInfo_get(struct cifsFileInfo *cifs_file)
370 spin_lock(&cifs_file->file_info_lock);
371 cifsFileInfo_get_locked(cifs_file);
372 spin_unlock(&cifs_file->file_info_lock);
377 * cifsFileInfo_put - release a reference of file priv data
379 * Always potentially wait for oplock handler. See _cifsFileInfo_put().
381 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
383 _cifsFileInfo_put(cifs_file, true);
387 * _cifsFileInfo_put - release a reference of file priv data
389 * This may involve closing the filehandle @cifs_file out on the
390 * server. Must be called without holding tcon->open_file_lock and
391 * cifs_file->file_info_lock.
393 * If @wait_for_oplock_handler is true and we are releasing the last
394 * reference, wait for any running oplock break handler of the file
395 * and cancel any pending one. If calling this function from the
396 * oplock break handler, you need to pass false.
399 void _cifsFileInfo_put(struct cifsFileInfo *cifs_file, bool wait_oplock_handler)
401 struct inode *inode = d_inode(cifs_file->dentry);
402 struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink);
403 struct TCP_Server_Info *server = tcon->ses->server;
404 struct cifsInodeInfo *cifsi = CIFS_I(inode);
405 struct super_block *sb = inode->i_sb;
406 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
407 struct cifsLockInfo *li, *tmp;
409 struct cifs_pending_open open;
410 bool oplock_break_cancelled;
412 spin_lock(&tcon->open_file_lock);
414 spin_lock(&cifs_file->file_info_lock);
415 if (--cifs_file->count > 0) {
416 spin_unlock(&cifs_file->file_info_lock);
417 spin_unlock(&tcon->open_file_lock);
420 spin_unlock(&cifs_file->file_info_lock);
422 if (server->ops->get_lease_key)
423 server->ops->get_lease_key(inode, &fid);
425 /* store open in pending opens to make sure we don't miss lease break */
426 cifs_add_pending_open_locked(&fid, cifs_file->tlink, &open);
428 /* remove it from the lists */
429 list_del(&cifs_file->flist);
430 list_del(&cifs_file->tlist);
432 if (list_empty(&cifsi->openFileList)) {
433 cifs_dbg(FYI, "closing last open instance for inode %p\n",
434 d_inode(cifs_file->dentry));
436 * In strict cache mode we need invalidate mapping on the last
437 * close because it may cause a error when we open this file
438 * again and get at least level II oplock.
440 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
441 set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);
442 cifs_set_oplock_level(cifsi, 0);
445 spin_unlock(&tcon->open_file_lock);
447 oplock_break_cancelled = wait_oplock_handler ?
448 cancel_work_sync(&cifs_file->oplock_break) : false;
450 if (!tcon->need_reconnect && !cifs_file->invalidHandle) {
451 struct TCP_Server_Info *server = tcon->ses->server;
455 if (server->ops->close)
456 server->ops->close(xid, tcon, &cifs_file->fid);
460 if (oplock_break_cancelled)
461 cifs_done_oplock_break(cifsi);
463 cifs_del_pending_open(&open);
466 * Delete any outstanding lock records. We'll lose them when the file
469 cifs_down_write(&cifsi->lock_sem);
470 list_for_each_entry_safe(li, tmp, &cifs_file->llist->locks, llist) {
471 list_del(&li->llist);
472 cifs_del_lock_waiters(li);
475 list_del(&cifs_file->llist->llist);
476 kfree(cifs_file->llist);
477 up_write(&cifsi->lock_sem);
479 cifs_put_tlink(cifs_file->tlink);
480 dput(cifs_file->dentry);
481 cifs_sb_deactive(sb);
485 int cifs_open(struct inode *inode, struct file *file)
491 struct cifs_sb_info *cifs_sb;
492 struct TCP_Server_Info *server;
493 struct cifs_tcon *tcon;
494 struct tcon_link *tlink;
495 struct cifsFileInfo *cfile = NULL;
496 char *full_path = NULL;
497 bool posix_open_ok = false;
499 struct cifs_pending_open open;
503 cifs_sb = CIFS_SB(inode->i_sb);
504 tlink = cifs_sb_tlink(cifs_sb);
507 return PTR_ERR(tlink);
509 tcon = tlink_tcon(tlink);
510 server = tcon->ses->server;
512 full_path = build_path_from_dentry(file_dentry(file));
513 if (full_path == NULL) {
518 cifs_dbg(FYI, "inode = 0x%p file flags are 0x%x for %s\n",
519 inode, file->f_flags, full_path);
521 if (file->f_flags & O_DIRECT &&
522 cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) {
523 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
524 file->f_op = &cifs_file_direct_nobrl_ops;
526 file->f_op = &cifs_file_direct_ops;
534 if (!tcon->broken_posix_open && tcon->unix_ext &&
535 cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
536 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
537 /* can not refresh inode info since size could be stale */
538 rc = cifs_posix_open(full_path, &inode, inode->i_sb,
539 cifs_sb->mnt_file_mode /* ignored */,
540 file->f_flags, &oplock, &fid.netfid, xid);
542 cifs_dbg(FYI, "posix open succeeded\n");
543 posix_open_ok = true;
544 } else if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
545 if (tcon->ses->serverNOS)
546 cifs_dbg(VFS, "server %s of type %s returned unexpected error on SMB posix open, disabling posix open support. Check if server update available.\n",
547 tcon->ses->serverName,
548 tcon->ses->serverNOS);
549 tcon->broken_posix_open = true;
550 } else if ((rc != -EIO) && (rc != -EREMOTE) &&
551 (rc != -EOPNOTSUPP)) /* path not found or net err */
554 * Else fallthrough to retry open the old way on network i/o
559 if (server->ops->get_lease_key)
560 server->ops->get_lease_key(inode, &fid);
562 cifs_add_pending_open(&fid, tlink, &open);
564 if (!posix_open_ok) {
565 if (server->ops->get_lease_key)
566 server->ops->get_lease_key(inode, &fid);
568 rc = cifs_nt_open(full_path, inode, cifs_sb, tcon,
569 file->f_flags, &oplock, &fid, xid);
571 cifs_del_pending_open(&open);
576 cfile = cifs_new_fileinfo(&fid, file, tlink, oplock);
578 if (server->ops->close)
579 server->ops->close(xid, tcon, &fid);
580 cifs_del_pending_open(&open);
585 cifs_fscache_set_inode_cookie(inode, file);
587 if ((oplock & CIFS_CREATE_ACTION) && !posix_open_ok && tcon->unix_ext) {
589 * Time to set mode which we can not set earlier due to
590 * problems creating new read-only files.
592 struct cifs_unix_set_info_args args = {
593 .mode = inode->i_mode,
594 .uid = INVALID_UID, /* no change */
595 .gid = INVALID_GID, /* no change */
596 .ctime = NO_CHANGE_64,
597 .atime = NO_CHANGE_64,
598 .mtime = NO_CHANGE_64,
601 CIFSSMBUnixSetFileInfo(xid, tcon, &args, fid.netfid,
608 cifs_put_tlink(tlink);
612 static int cifs_push_posix_locks(struct cifsFileInfo *cfile);
615 * Try to reacquire byte range locks that were released when session
616 * to server was lost.
619 cifs_relock_file(struct cifsFileInfo *cfile)
621 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
622 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
623 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
626 down_read_nested(&cinode->lock_sem, SINGLE_DEPTH_NESTING);
627 if (cinode->can_cache_brlcks) {
628 /* can cache locks - no need to relock */
629 up_read(&cinode->lock_sem);
633 if (cap_unix(tcon->ses) &&
634 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
635 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
636 rc = cifs_push_posix_locks(cfile);
638 rc = tcon->ses->server->ops->push_mand_locks(cfile);
640 up_read(&cinode->lock_sem);
645 cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
650 struct cifs_sb_info *cifs_sb;
651 struct cifs_tcon *tcon;
652 struct TCP_Server_Info *server;
653 struct cifsInodeInfo *cinode;
655 char *full_path = NULL;
657 int disposition = FILE_OPEN;
658 int create_options = CREATE_NOT_DIR;
659 struct cifs_open_parms oparms;
662 mutex_lock(&cfile->fh_mutex);
663 if (!cfile->invalidHandle) {
664 mutex_unlock(&cfile->fh_mutex);
670 inode = d_inode(cfile->dentry);
671 cifs_sb = CIFS_SB(inode->i_sb);
672 tcon = tlink_tcon(cfile->tlink);
673 server = tcon->ses->server;
676 * Can not grab rename sem here because various ops, including those
677 * that already have the rename sem can end up causing writepage to get
678 * called and if the server was down that means we end up here, and we
679 * can never tell if the caller already has the rename_sem.
681 full_path = build_path_from_dentry(cfile->dentry);
682 if (full_path == NULL) {
684 mutex_unlock(&cfile->fh_mutex);
689 cifs_dbg(FYI, "inode = 0x%p file flags 0x%x for %s\n",
690 inode, cfile->f_flags, full_path);
692 if (tcon->ses->server->oplocks)
697 if (tcon->unix_ext && cap_unix(tcon->ses) &&
698 (CIFS_UNIX_POSIX_PATH_OPS_CAP &
699 le64_to_cpu(tcon->fsUnixInfo.Capability))) {
701 * O_CREAT, O_EXCL and O_TRUNC already had their effect on the
702 * original open. Must mask them off for a reopen.
704 unsigned int oflags = cfile->f_flags &
705 ~(O_CREAT | O_EXCL | O_TRUNC);
707 rc = cifs_posix_open(full_path, NULL, inode->i_sb,
708 cifs_sb->mnt_file_mode /* ignored */,
709 oflags, &oplock, &cfile->fid.netfid, xid);
711 cifs_dbg(FYI, "posix reopen succeeded\n");
712 oparms.reconnect = true;
716 * fallthrough to retry open the old way on errors, especially
717 * in the reconnect path it is important to retry hard
721 desired_access = cifs_convert_flags(cfile->f_flags);
723 if (backup_cred(cifs_sb))
724 create_options |= CREATE_OPEN_BACKUP_INTENT;
726 /* O_SYNC also has bit for O_DSYNC so following check picks up either */
727 if (cfile->f_flags & O_SYNC)
728 create_options |= CREATE_WRITE_THROUGH;
730 if (cfile->f_flags & O_DIRECT)
731 create_options |= CREATE_NO_BUFFER;
733 if (server->ops->get_lease_key)
734 server->ops->get_lease_key(inode, &cfile->fid);
737 oparms.cifs_sb = cifs_sb;
738 oparms.desired_access = desired_access;
739 oparms.create_options = create_options;
740 oparms.disposition = disposition;
741 oparms.path = full_path;
742 oparms.fid = &cfile->fid;
743 oparms.reconnect = true;
746 * Can not refresh inode by passing in file_info buf to be returned by
747 * ops->open and then calling get_inode_info with returned buf since
748 * file might have write behind data that needs to be flushed and server
749 * version of file size can be stale. If we knew for sure that inode was
750 * not dirty locally we could do this.
752 rc = server->ops->open(xid, &oparms, &oplock, NULL);
753 if (rc == -ENOENT && oparms.reconnect == false) {
754 /* durable handle timeout is expired - open the file again */
755 rc = server->ops->open(xid, &oparms, &oplock, NULL);
756 /* indicate that we need to relock the file */
757 oparms.reconnect = true;
761 mutex_unlock(&cfile->fh_mutex);
762 cifs_dbg(FYI, "cifs_reopen returned 0x%x\n", rc);
763 cifs_dbg(FYI, "oplock: %d\n", oplock);
764 goto reopen_error_exit;
768 cfile->invalidHandle = false;
769 mutex_unlock(&cfile->fh_mutex);
770 cinode = CIFS_I(inode);
773 rc = filemap_write_and_wait(inode->i_mapping);
774 mapping_set_error(inode->i_mapping, rc);
777 rc = cifs_get_inode_info_unix(&inode, full_path,
780 rc = cifs_get_inode_info(&inode, full_path, NULL,
781 inode->i_sb, xid, NULL);
784 * Else we are writing out data to server already and could deadlock if
785 * we tried to flush data, and since we do not know if we have data that
786 * would invalidate the current end of file on the server we can not go
787 * to the server to get the new inode info.
791 * If the server returned a read oplock and we have mandatory brlocks,
792 * set oplock level to None.
794 if (server->ops->is_read_op(oplock) && cifs_has_mand_locks(cinode)) {
795 cifs_dbg(FYI, "Reset oplock val from read to None due to mand locks\n");
799 server->ops->set_fid(cfile, &cfile->fid, oplock);
800 if (oparms.reconnect)
801 cifs_relock_file(cfile);
809 int cifs_close(struct inode *inode, struct file *file)
811 if (file->private_data != NULL) {
812 cifsFileInfo_put(file->private_data);
813 file->private_data = NULL;
816 /* return code from the ->release op is always ignored */
821 cifs_reopen_persistent_handles(struct cifs_tcon *tcon)
823 struct cifsFileInfo *open_file;
824 struct list_head *tmp;
825 struct list_head *tmp1;
826 struct list_head tmp_list;
828 if (!tcon->use_persistent || !tcon->need_reopen_files)
831 tcon->need_reopen_files = false;
833 cifs_dbg(FYI, "Reopen persistent handles");
834 INIT_LIST_HEAD(&tmp_list);
836 /* list all files open on tree connection, reopen resilient handles */
837 spin_lock(&tcon->open_file_lock);
838 list_for_each(tmp, &tcon->openFileList) {
839 open_file = list_entry(tmp, struct cifsFileInfo, tlist);
840 if (!open_file->invalidHandle)
842 cifsFileInfo_get(open_file);
843 list_add_tail(&open_file->rlist, &tmp_list);
845 spin_unlock(&tcon->open_file_lock);
847 list_for_each_safe(tmp, tmp1, &tmp_list) {
848 open_file = list_entry(tmp, struct cifsFileInfo, rlist);
849 if (cifs_reopen_file(open_file, false /* do not flush */))
850 tcon->need_reopen_files = true;
851 list_del_init(&open_file->rlist);
852 cifsFileInfo_put(open_file);
856 int cifs_closedir(struct inode *inode, struct file *file)
860 struct cifsFileInfo *cfile = file->private_data;
861 struct cifs_tcon *tcon;
862 struct TCP_Server_Info *server;
865 cifs_dbg(FYI, "Closedir inode = 0x%p\n", inode);
871 tcon = tlink_tcon(cfile->tlink);
872 server = tcon->ses->server;
874 cifs_dbg(FYI, "Freeing private data in close dir\n");
875 spin_lock(&cfile->file_info_lock);
876 if (server->ops->dir_needs_close(cfile)) {
877 cfile->invalidHandle = true;
878 spin_unlock(&cfile->file_info_lock);
879 if (server->ops->close_dir)
880 rc = server->ops->close_dir(xid, tcon, &cfile->fid);
883 cifs_dbg(FYI, "Closing uncompleted readdir with rc %d\n", rc);
884 /* not much we can do if it fails anyway, ignore rc */
887 spin_unlock(&cfile->file_info_lock);
889 buf = cfile->srch_inf.ntwrk_buf_start;
891 cifs_dbg(FYI, "closedir free smb buf in srch struct\n");
892 cfile->srch_inf.ntwrk_buf_start = NULL;
893 if (cfile->srch_inf.smallBuf)
894 cifs_small_buf_release(buf);
896 cifs_buf_release(buf);
899 cifs_put_tlink(cfile->tlink);
900 kfree(file->private_data);
901 file->private_data = NULL;
902 /* BB can we lock the filestruct while this is going on? */
907 static struct cifsLockInfo *
908 cifs_lock_init(__u64 offset, __u64 length, __u8 type)
910 struct cifsLockInfo *lock =
911 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
914 lock->offset = offset;
915 lock->length = length;
917 lock->pid = current->tgid;
918 INIT_LIST_HEAD(&lock->blist);
919 init_waitqueue_head(&lock->block_q);
924 cifs_del_lock_waiters(struct cifsLockInfo *lock)
926 struct cifsLockInfo *li, *tmp;
927 list_for_each_entry_safe(li, tmp, &lock->blist, blist) {
928 list_del_init(&li->blist);
929 wake_up(&li->block_q);
933 #define CIFS_LOCK_OP 0
934 #define CIFS_READ_OP 1
935 #define CIFS_WRITE_OP 2
937 /* @rw_check : 0 - no op, 1 - read, 2 - write */
939 cifs_find_fid_lock_conflict(struct cifs_fid_locks *fdlocks, __u64 offset,
940 __u64 length, __u8 type, struct cifsFileInfo *cfile,
941 struct cifsLockInfo **conf_lock, int rw_check)
943 struct cifsLockInfo *li;
944 struct cifsFileInfo *cur_cfile = fdlocks->cfile;
945 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
947 list_for_each_entry(li, &fdlocks->locks, llist) {
948 if (offset + length <= li->offset ||
949 offset >= li->offset + li->length)
951 if (rw_check != CIFS_LOCK_OP && current->tgid == li->pid &&
952 server->ops->compare_fids(cfile, cur_cfile)) {
953 /* shared lock prevents write op through the same fid */
954 if (!(li->type & server->vals->shared_lock_type) ||
955 rw_check != CIFS_WRITE_OP)
958 if ((type & server->vals->shared_lock_type) &&
959 ((server->ops->compare_fids(cfile, cur_cfile) &&
960 current->tgid == li->pid) || type == li->type))
970 cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
971 __u8 type, struct cifsLockInfo **conf_lock,
975 struct cifs_fid_locks *cur;
976 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
978 list_for_each_entry(cur, &cinode->llist, llist) {
979 rc = cifs_find_fid_lock_conflict(cur, offset, length, type,
980 cfile, conf_lock, rw_check);
989 * Check if there is another lock that prevents us to set the lock (mandatory
990 * style). If such a lock exists, update the flock structure with its
991 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
992 * or leave it the same if we can't. Returns 0 if we don't need to request to
993 * the server or 1 otherwise.
996 cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
997 __u8 type, struct file_lock *flock)
1000 struct cifsLockInfo *conf_lock;
1001 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1002 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
1005 down_read(&cinode->lock_sem);
1007 exist = cifs_find_lock_conflict(cfile, offset, length, type,
1008 &conf_lock, CIFS_LOCK_OP);
1010 flock->fl_start = conf_lock->offset;
1011 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
1012 flock->fl_pid = conf_lock->pid;
1013 if (conf_lock->type & server->vals->shared_lock_type)
1014 flock->fl_type = F_RDLCK;
1016 flock->fl_type = F_WRLCK;
1017 } else if (!cinode->can_cache_brlcks)
1020 flock->fl_type = F_UNLCK;
1022 up_read(&cinode->lock_sem);
1027 cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
1029 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1030 cifs_down_write(&cinode->lock_sem);
1031 list_add_tail(&lock->llist, &cfile->llist->locks);
1032 up_write(&cinode->lock_sem);
1036 * Set the byte-range lock (mandatory style). Returns:
1037 * 1) 0, if we set the lock and don't need to request to the server;
1038 * 2) 1, if no locks prevent us but we need to request to the server;
1039 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
1042 cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
1045 struct cifsLockInfo *conf_lock;
1046 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1052 cifs_down_write(&cinode->lock_sem);
1054 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
1055 lock->type, &conf_lock, CIFS_LOCK_OP);
1056 if (!exist && cinode->can_cache_brlcks) {
1057 list_add_tail(&lock->llist, &cfile->llist->locks);
1058 up_write(&cinode->lock_sem);
1067 list_add_tail(&lock->blist, &conf_lock->blist);
1068 up_write(&cinode->lock_sem);
1069 rc = wait_event_interruptible(lock->block_q,
1070 (lock->blist.prev == &lock->blist) &&
1071 (lock->blist.next == &lock->blist));
1074 cifs_down_write(&cinode->lock_sem);
1075 list_del_init(&lock->blist);
1078 up_write(&cinode->lock_sem);
1083 * Check if there is another lock that prevents us to set the lock (posix
1084 * style). If such a lock exists, update the flock structure with its
1085 * properties. Otherwise, set the flock type to F_UNLCK if we can cache brlocks
1086 * or leave it the same if we can't. Returns 0 if we don't need to request to
1087 * the server or 1 otherwise.
1090 cifs_posix_lock_test(struct file *file, struct file_lock *flock)
1093 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1094 unsigned char saved_type = flock->fl_type;
1096 if ((flock->fl_flags & FL_POSIX) == 0)
1099 down_read(&cinode->lock_sem);
1100 posix_test_lock(file, flock);
1102 if (flock->fl_type == F_UNLCK && !cinode->can_cache_brlcks) {
1103 flock->fl_type = saved_type;
1107 up_read(&cinode->lock_sem);
1112 * Set the byte-range lock (posix style). Returns:
1113 * 1) 0, if we set the lock and don't need to request to the server;
1114 * 2) 1, if we need to request to the server;
1115 * 3) <0, if the error occurs while setting the lock.
1118 cifs_posix_lock_set(struct file *file, struct file_lock *flock)
1120 struct cifsInodeInfo *cinode = CIFS_I(file_inode(file));
1123 if ((flock->fl_flags & FL_POSIX) == 0)
1127 cifs_down_write(&cinode->lock_sem);
1128 if (!cinode->can_cache_brlcks) {
1129 up_write(&cinode->lock_sem);
1133 rc = posix_lock_file(file, flock, NULL);
1134 up_write(&cinode->lock_sem);
1135 if (rc == FILE_LOCK_DEFERRED) {
1136 rc = wait_event_interruptible(flock->fl_wait, !flock->fl_next);
1139 posix_unblock_lock(flock);
1145 cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
1148 int rc = 0, stored_rc;
1149 struct cifsLockInfo *li, *tmp;
1150 struct cifs_tcon *tcon;
1151 unsigned int num, max_num, max_buf;
1152 LOCKING_ANDX_RANGE *buf, *cur;
1153 int types[] = {LOCKING_ANDX_LARGE_FILES,
1154 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1158 tcon = tlink_tcon(cfile->tlink);
1161 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1162 * and check it before using.
1164 max_buf = tcon->ses->server->maxBuf;
1165 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE))) {
1170 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1172 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1174 max_num = (max_buf - sizeof(struct smb_hdr)) /
1175 sizeof(LOCKING_ANDX_RANGE);
1176 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1182 for (i = 0; i < 2; i++) {
1185 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1186 if (li->type != types[i])
1188 cur->Pid = cpu_to_le16(li->pid);
1189 cur->LengthLow = cpu_to_le32((u32)li->length);
1190 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1191 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1192 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1193 if (++num == max_num) {
1194 stored_rc = cifs_lockv(xid, tcon,
1196 (__u8)li->type, 0, num,
1207 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1208 (__u8)types[i], 0, num, buf);
1220 hash_lockowner(fl_owner_t owner)
1222 return cifs_lock_secret ^ hash32_ptr((const void *)owner);
1225 struct lock_to_push {
1226 struct list_head llist;
1235 cifs_push_posix_locks(struct cifsFileInfo *cfile)
1237 struct inode *inode = d_inode(cfile->dentry);
1238 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1239 struct file_lock *flock;
1240 struct file_lock_context *flctx = inode->i_flctx;
1241 unsigned int count = 0, i;
1242 int rc = 0, xid, type;
1243 struct list_head locks_to_send, *el;
1244 struct lock_to_push *lck, *tmp;
1252 spin_lock(&flctx->flc_lock);
1253 list_for_each(el, &flctx->flc_posix) {
1256 spin_unlock(&flctx->flc_lock);
1258 INIT_LIST_HEAD(&locks_to_send);
1261 * Allocating count locks is enough because no FL_POSIX locks can be
1262 * added to the list while we are holding cinode->lock_sem that
1263 * protects locking operations of this inode.
1265 for (i = 0; i < count; i++) {
1266 lck = kmalloc(sizeof(struct lock_to_push), GFP_KERNEL);
1271 list_add_tail(&lck->llist, &locks_to_send);
1274 el = locks_to_send.next;
1275 spin_lock(&flctx->flc_lock);
1276 list_for_each_entry(flock, &flctx->flc_posix, fl_list) {
1277 if (el == &locks_to_send) {
1279 * The list ended. We don't have enough allocated
1280 * structures - something is really wrong.
1282 cifs_dbg(VFS, "Can't push all brlocks!\n");
1285 length = 1 + flock->fl_end - flock->fl_start;
1286 if (flock->fl_type == F_RDLCK || flock->fl_type == F_SHLCK)
1290 lck = list_entry(el, struct lock_to_push, llist);
1291 lck->pid = hash_lockowner(flock->fl_owner);
1292 lck->netfid = cfile->fid.netfid;
1293 lck->length = length;
1295 lck->offset = flock->fl_start;
1297 spin_unlock(&flctx->flc_lock);
1299 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1302 stored_rc = CIFSSMBPosixLock(xid, tcon, lck->netfid, lck->pid,
1303 lck->offset, lck->length, NULL,
1307 list_del(&lck->llist);
1315 list_for_each_entry_safe(lck, tmp, &locks_to_send, llist) {
1316 list_del(&lck->llist);
1323 cifs_push_locks(struct cifsFileInfo *cfile)
1325 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
1326 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1327 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1330 /* we are going to update can_cache_brlcks here - need a write access */
1331 cifs_down_write(&cinode->lock_sem);
1332 if (!cinode->can_cache_brlcks) {
1333 up_write(&cinode->lock_sem);
1337 if (cap_unix(tcon->ses) &&
1338 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1339 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1340 rc = cifs_push_posix_locks(cfile);
1342 rc = tcon->ses->server->ops->push_mand_locks(cfile);
1344 cinode->can_cache_brlcks = false;
1345 up_write(&cinode->lock_sem);
1350 cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1351 bool *wait_flag, struct TCP_Server_Info *server)
1353 if (flock->fl_flags & FL_POSIX)
1354 cifs_dbg(FYI, "Posix\n");
1355 if (flock->fl_flags & FL_FLOCK)
1356 cifs_dbg(FYI, "Flock\n");
1357 if (flock->fl_flags & FL_SLEEP) {
1358 cifs_dbg(FYI, "Blocking lock\n");
1361 if (flock->fl_flags & FL_ACCESS)
1362 cifs_dbg(FYI, "Process suspended by mandatory locking - not implemented yet\n");
1363 if (flock->fl_flags & FL_LEASE)
1364 cifs_dbg(FYI, "Lease on file - not implemented yet\n");
1365 if (flock->fl_flags &
1366 (~(FL_POSIX | FL_FLOCK | FL_SLEEP |
1367 FL_ACCESS | FL_LEASE | FL_CLOSE)))
1368 cifs_dbg(FYI, "Unknown lock flags 0x%x\n", flock->fl_flags);
1370 *type = server->vals->large_lock_type;
1371 if (flock->fl_type == F_WRLCK) {
1372 cifs_dbg(FYI, "F_WRLCK\n");
1373 *type |= server->vals->exclusive_lock_type;
1375 } else if (flock->fl_type == F_UNLCK) {
1376 cifs_dbg(FYI, "F_UNLCK\n");
1377 *type |= server->vals->unlock_lock_type;
1379 /* Check if unlock includes more than one lock range */
1380 } else if (flock->fl_type == F_RDLCK) {
1381 cifs_dbg(FYI, "F_RDLCK\n");
1382 *type |= server->vals->shared_lock_type;
1384 } else if (flock->fl_type == F_EXLCK) {
1385 cifs_dbg(FYI, "F_EXLCK\n");
1386 *type |= server->vals->exclusive_lock_type;
1388 } else if (flock->fl_type == F_SHLCK) {
1389 cifs_dbg(FYI, "F_SHLCK\n");
1390 *type |= server->vals->shared_lock_type;
1393 cifs_dbg(FYI, "Unknown type of lock\n");
1397 cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
1398 bool wait_flag, bool posix_lck, unsigned int xid)
1401 __u64 length = 1 + flock->fl_end - flock->fl_start;
1402 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1403 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1404 struct TCP_Server_Info *server = tcon->ses->server;
1405 __u16 netfid = cfile->fid.netfid;
1408 int posix_lock_type;
1410 rc = cifs_posix_lock_test(file, flock);
1414 if (type & server->vals->shared_lock_type)
1415 posix_lock_type = CIFS_RDLCK;
1417 posix_lock_type = CIFS_WRLCK;
1418 rc = CIFSSMBPosixLock(xid, tcon, netfid,
1419 hash_lockowner(flock->fl_owner),
1420 flock->fl_start, length, flock,
1421 posix_lock_type, wait_flag);
1425 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
1429 /* BB we could chain these into one lock request BB */
1430 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length, type,
1433 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1435 flock->fl_type = F_UNLCK;
1437 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1442 if (type & server->vals->shared_lock_type) {
1443 flock->fl_type = F_WRLCK;
1447 type &= ~server->vals->exclusive_lock_type;
1449 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1450 type | server->vals->shared_lock_type,
1453 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1454 type | server->vals->shared_lock_type, 0, 1, false);
1455 flock->fl_type = F_RDLCK;
1457 cifs_dbg(VFS, "Error unlocking previously locked range %d during test of lock\n",
1460 flock->fl_type = F_WRLCK;
1466 cifs_move_llist(struct list_head *source, struct list_head *dest)
1468 struct list_head *li, *tmp;
1469 list_for_each_safe(li, tmp, source)
1470 list_move(li, dest);
1474 cifs_free_llist(struct list_head *llist)
1476 struct cifsLockInfo *li, *tmp;
1477 list_for_each_entry_safe(li, tmp, llist, llist) {
1478 cifs_del_lock_waiters(li);
1479 list_del(&li->llist);
1485 cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock,
1488 int rc = 0, stored_rc;
1489 int types[] = {LOCKING_ANDX_LARGE_FILES,
1490 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1492 unsigned int max_num, num, max_buf;
1493 LOCKING_ANDX_RANGE *buf, *cur;
1494 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1495 struct cifsInodeInfo *cinode = CIFS_I(d_inode(cfile->dentry));
1496 struct cifsLockInfo *li, *tmp;
1497 __u64 length = 1 + flock->fl_end - flock->fl_start;
1498 struct list_head tmp_llist;
1500 INIT_LIST_HEAD(&tmp_llist);
1503 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1504 * and check it before using.
1506 max_buf = tcon->ses->server->maxBuf;
1507 if (max_buf < (sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE)))
1510 BUILD_BUG_ON(sizeof(struct smb_hdr) + sizeof(LOCKING_ANDX_RANGE) >
1512 max_buf = min_t(unsigned int, max_buf - sizeof(struct smb_hdr),
1514 max_num = (max_buf - sizeof(struct smb_hdr)) /
1515 sizeof(LOCKING_ANDX_RANGE);
1516 buf = kcalloc(max_num, sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1520 cifs_down_write(&cinode->lock_sem);
1521 for (i = 0; i < 2; i++) {
1524 list_for_each_entry_safe(li, tmp, &cfile->llist->locks, llist) {
1525 if (flock->fl_start > li->offset ||
1526 (flock->fl_start + length) <
1527 (li->offset + li->length))
1529 if (current->tgid != li->pid)
1531 if (types[i] != li->type)
1533 if (cinode->can_cache_brlcks) {
1535 * We can cache brlock requests - simply remove
1536 * a lock from the file's list.
1538 list_del(&li->llist);
1539 cifs_del_lock_waiters(li);
1543 cur->Pid = cpu_to_le16(li->pid);
1544 cur->LengthLow = cpu_to_le32((u32)li->length);
1545 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1546 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1547 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1549 * We need to save a lock here to let us add it again to
1550 * the file's list if the unlock range request fails on
1553 list_move(&li->llist, &tmp_llist);
1554 if (++num == max_num) {
1555 stored_rc = cifs_lockv(xid, tcon,
1557 li->type, num, 0, buf);
1560 * We failed on the unlock range
1561 * request - add all locks from the tmp
1562 * list to the head of the file's list.
1564 cifs_move_llist(&tmp_llist,
1565 &cfile->llist->locks);
1569 * The unlock range request succeed -
1570 * free the tmp list.
1572 cifs_free_llist(&tmp_llist);
1579 stored_rc = cifs_lockv(xid, tcon, cfile->fid.netfid,
1580 types[i], num, 0, buf);
1582 cifs_move_llist(&tmp_llist,
1583 &cfile->llist->locks);
1586 cifs_free_llist(&tmp_llist);
1590 up_write(&cinode->lock_sem);
1596 cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
1597 bool wait_flag, bool posix_lck, int lock, int unlock,
1601 __u64 length = 1 + flock->fl_end - flock->fl_start;
1602 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1603 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1604 struct TCP_Server_Info *server = tcon->ses->server;
1605 struct inode *inode = d_inode(cfile->dentry);
1608 int posix_lock_type;
1610 rc = cifs_posix_lock_set(file, flock);
1614 if (type & server->vals->shared_lock_type)
1615 posix_lock_type = CIFS_RDLCK;
1617 posix_lock_type = CIFS_WRLCK;
1620 posix_lock_type = CIFS_UNLCK;
1622 rc = CIFSSMBPosixLock(xid, tcon, cfile->fid.netfid,
1623 hash_lockowner(flock->fl_owner),
1624 flock->fl_start, length,
1625 NULL, posix_lock_type, wait_flag);
1630 struct cifsLockInfo *lock;
1632 lock = cifs_lock_init(flock->fl_start, length, type);
1636 rc = cifs_lock_add_if(cfile, lock, wait_flag);
1645 * Windows 7 server can delay breaking lease from read to None
1646 * if we set a byte-range lock on a file - break it explicitly
1647 * before sending the lock to the server to be sure the next
1648 * read won't conflict with non-overlapted locks due to
1651 if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&
1652 CIFS_CACHE_READ(CIFS_I(inode))) {
1653 cifs_zap_mapping(inode);
1654 cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",
1656 CIFS_I(inode)->oplock = 0;
1659 rc = server->ops->mand_lock(xid, cfile, flock->fl_start, length,
1660 type, 1, 0, wait_flag);
1666 cifs_lock_add(cfile, lock);
1668 rc = server->ops->mand_unlock_range(cfile, flock, xid);
1671 if (flock->fl_flags & FL_POSIX) {
1673 * If this is a request to remove all locks because we
1674 * are closing the file, it doesn't matter if the
1675 * unlocking failed as both cifs.ko and the SMB server
1676 * remove the lock on file close
1679 cifs_dbg(VFS, "%s failed rc=%d\n", __func__, rc);
1680 if (!(flock->fl_flags & FL_CLOSE))
1683 rc = locks_lock_file_wait(file, flock);
1688 int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1691 int lock = 0, unlock = 0;
1692 bool wait_flag = false;
1693 bool posix_lck = false;
1694 struct cifs_sb_info *cifs_sb;
1695 struct cifs_tcon *tcon;
1696 struct cifsInodeInfo *cinode;
1697 struct cifsFileInfo *cfile;
1704 cifs_dbg(FYI, "Lock parm: 0x%x flockflags: 0x%x flocktype: 0x%x start: %lld end: %lld\n",
1705 cmd, flock->fl_flags, flock->fl_type,
1706 flock->fl_start, flock->fl_end);
1708 cfile = (struct cifsFileInfo *)file->private_data;
1709 tcon = tlink_tcon(cfile->tlink);
1711 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1714 cifs_sb = CIFS_FILE_SB(file);
1715 netfid = cfile->fid.netfid;
1716 cinode = CIFS_I(file_inode(file));
1718 if (cap_unix(tcon->ses) &&
1719 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
1720 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
1723 * BB add code here to normalize offset and length to account for
1724 * negative length which we can not accept over the wire.
1726 if (IS_GETLK(cmd)) {
1727 rc = cifs_getlk(file, flock, type, wait_flag, posix_lck, xid);
1732 if (!lock && !unlock) {
1734 * if no lock or unlock then nothing to do since we do not
1741 rc = cifs_setlk(file, flock, type, wait_flag, posix_lck, lock, unlock,
1748 * update the file size (if needed) after a write. Should be called with
1749 * the inode->i_lock held
1752 cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
1753 unsigned int bytes_written)
1755 loff_t end_of_write = offset + bytes_written;
1757 if (end_of_write > cifsi->server_eof)
1758 cifsi->server_eof = end_of_write;
1762 cifs_write(struct cifsFileInfo *open_file, __u32 pid, const char *write_data,
1763 size_t write_size, loff_t *offset)
1766 unsigned int bytes_written = 0;
1767 unsigned int total_written;
1768 struct cifs_sb_info *cifs_sb;
1769 struct cifs_tcon *tcon;
1770 struct TCP_Server_Info *server;
1772 struct dentry *dentry = open_file->dentry;
1773 struct cifsInodeInfo *cifsi = CIFS_I(d_inode(dentry));
1774 struct cifs_io_parms io_parms;
1776 cifs_sb = CIFS_SB(dentry->d_sb);
1778 cifs_dbg(FYI, "write %zd bytes to offset %lld of %pd\n",
1779 write_size, *offset, dentry);
1781 tcon = tlink_tcon(open_file->tlink);
1782 server = tcon->ses->server;
1784 if (!server->ops->sync_write)
1789 for (total_written = 0; write_size > total_written;
1790 total_written += bytes_written) {
1792 while (rc == -EAGAIN) {
1796 if (open_file->invalidHandle) {
1797 /* we could deadlock if we called
1798 filemap_fdatawait from here so tell
1799 reopen_file not to flush data to
1801 rc = cifs_reopen_file(open_file, false);
1806 len = min(server->ops->wp_retry_size(d_inode(dentry)),
1807 (unsigned int)write_size - total_written);
1808 /* iov[0] is reserved for smb header */
1809 iov[1].iov_base = (char *)write_data + total_written;
1810 iov[1].iov_len = len;
1812 io_parms.tcon = tcon;
1813 io_parms.offset = *offset;
1814 io_parms.length = len;
1815 rc = server->ops->sync_write(xid, &open_file->fid,
1816 &io_parms, &bytes_written, iov, 1);
1818 if (rc || (bytes_written == 0)) {
1826 spin_lock(&d_inode(dentry)->i_lock);
1827 cifs_update_eof(cifsi, *offset, bytes_written);
1828 spin_unlock(&d_inode(dentry)->i_lock);
1829 *offset += bytes_written;
1833 cifs_stats_bytes_written(tcon, total_written);
1835 if (total_written > 0) {
1836 spin_lock(&d_inode(dentry)->i_lock);
1837 if (*offset > d_inode(dentry)->i_size)
1838 i_size_write(d_inode(dentry), *offset);
1839 spin_unlock(&d_inode(dentry)->i_lock);
1841 mark_inode_dirty_sync(d_inode(dentry));
1843 return total_written;
1846 struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1849 struct cifsFileInfo *open_file = NULL;
1850 struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1851 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
1853 /* only filter by fsuid on multiuser mounts */
1854 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1857 spin_lock(&tcon->open_file_lock);
1858 /* we could simply get the first_list_entry since write-only entries
1859 are always at the end of the list but since the first entry might
1860 have a close pending, we go through the whole list */
1861 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1862 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
1864 if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
1865 if (!open_file->invalidHandle) {
1866 /* found a good file */
1867 /* lock it so it will not be closed on us */
1868 cifsFileInfo_get(open_file);
1869 spin_unlock(&tcon->open_file_lock);
1871 } /* else might as well continue, and look for
1872 another, or simply have the caller reopen it
1873 again rather than trying to fix this handle */
1874 } else /* write only file */
1875 break; /* write only files are last so must be done */
1877 spin_unlock(&tcon->open_file_lock);
1881 struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1884 struct cifsFileInfo *open_file, *inv_file = NULL;
1885 struct cifs_sb_info *cifs_sb;
1886 struct cifs_tcon *tcon;
1887 bool any_available = false;
1889 unsigned int refind = 0;
1891 /* Having a null inode here (because mapping->host was set to zero by
1892 the VFS or MM) should not happen but we had reports of on oops (due to
1893 it being zero) during stress testcases so we need to check for it */
1895 if (cifs_inode == NULL) {
1896 cifs_dbg(VFS, "Null inode passed to cifs_writeable_file\n");
1901 cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
1902 tcon = cifs_sb_master_tcon(cifs_sb);
1904 /* only filter by fsuid on multiuser mounts */
1905 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
1908 spin_lock(&tcon->open_file_lock);
1910 if (refind > MAX_REOPEN_ATT) {
1911 spin_unlock(&tcon->open_file_lock);
1914 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1915 if (!any_available && open_file->pid != current->tgid)
1917 if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
1919 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
1920 if (!open_file->invalidHandle) {
1921 /* found a good writable file */
1922 cifsFileInfo_get(open_file);
1923 spin_unlock(&tcon->open_file_lock);
1927 inv_file = open_file;
1931 /* couldn't find useable FH with same pid, try any available */
1932 if (!any_available) {
1933 any_available = true;
1934 goto refind_writable;
1938 any_available = false;
1939 cifsFileInfo_get(inv_file);
1942 spin_unlock(&tcon->open_file_lock);
1945 rc = cifs_reopen_file(inv_file, false);
1949 spin_lock(&tcon->open_file_lock);
1950 list_move_tail(&inv_file->flist,
1951 &cifs_inode->openFileList);
1952 spin_unlock(&tcon->open_file_lock);
1953 cifsFileInfo_put(inv_file);
1956 spin_lock(&tcon->open_file_lock);
1957 goto refind_writable;
1964 static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
1966 struct address_space *mapping = page->mapping;
1967 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
1970 int bytes_written = 0;
1971 struct inode *inode;
1972 struct cifsFileInfo *open_file;
1974 if (!mapping || !mapping->host)
1977 inode = page->mapping->host;
1979 offset += (loff_t)from;
1980 write_data = kmap(page);
1983 if ((to > PAGE_SIZE) || (from > to)) {
1988 /* racing with truncate? */
1989 if (offset > mapping->host->i_size) {
1991 return 0; /* don't care */
1994 /* check to make sure that we are not extending the file */
1995 if (mapping->host->i_size - offset < (loff_t)to)
1996 to = (unsigned)(mapping->host->i_size - offset);
1998 open_file = find_writable_file(CIFS_I(mapping->host), false);
2000 bytes_written = cifs_write(open_file, open_file->pid,
2001 write_data, to - from, &offset);
2002 cifsFileInfo_put(open_file);
2003 /* Does mm or vfs already set times? */
2004 inode->i_atime = inode->i_mtime = current_time(inode);
2005 if ((bytes_written > 0) && (offset))
2007 else if (bytes_written < 0)
2010 cifs_dbg(FYI, "No writeable filehandles for inode\n");
2018 static struct cifs_writedata *
2019 wdata_alloc_and_fillpages(pgoff_t tofind, struct address_space *mapping,
2020 pgoff_t end, pgoff_t *index,
2021 unsigned int *found_pages)
2023 unsigned int nr_pages;
2024 struct page **pages;
2025 struct cifs_writedata *wdata;
2027 wdata = cifs_writedata_alloc((unsigned int)tofind,
2028 cifs_writev_complete);
2033 * find_get_pages_tag seems to return a max of 256 on each
2034 * iteration, so we must call it several times in order to
2035 * fill the array or the wsize is effectively limited to
2039 pages = wdata->pages;
2041 nr_pages = find_get_pages_tag(mapping, index,
2042 PAGECACHE_TAG_DIRTY, tofind,
2044 *found_pages += nr_pages;
2047 } while (nr_pages && tofind && *index <= end);
2053 wdata_prepare_pages(struct cifs_writedata *wdata, unsigned int found_pages,
2054 struct address_space *mapping,
2055 struct writeback_control *wbc,
2056 pgoff_t end, pgoff_t *index, pgoff_t *next, bool *done)
2058 unsigned int nr_pages = 0, i;
2061 for (i = 0; i < found_pages; i++) {
2062 page = wdata->pages[i];
2064 * At this point we hold neither mapping->tree_lock nor
2065 * lock on the page itself: the page may be truncated or
2066 * invalidated (changing page->mapping to NULL), or even
2067 * swizzled back from swapper_space to tmpfs file
2073 else if (!trylock_page(page))
2076 if (unlikely(page->mapping != mapping)) {
2081 if (!wbc->range_cyclic && page->index > end) {
2087 if (*next && (page->index != *next)) {
2088 /* Not next consecutive page */
2093 if (wbc->sync_mode != WB_SYNC_NONE)
2094 wait_on_page_writeback(page);
2096 if (PageWriteback(page) ||
2097 !clear_page_dirty_for_io(page)) {
2103 * This actually clears the dirty bit in the radix tree.
2104 * See cifs_writepage() for more commentary.
2106 set_page_writeback(page);
2107 if (page_offset(page) >= i_size_read(mapping->host)) {
2110 end_page_writeback(page);
2114 wdata->pages[i] = page;
2115 *next = page->index + 1;
2119 /* reset index to refind any pages skipped */
2121 *index = wdata->pages[0]->index + 1;
2123 /* put any pages we aren't going to use */
2124 for (i = nr_pages; i < found_pages; i++) {
2125 put_page(wdata->pages[i]);
2126 wdata->pages[i] = NULL;
2133 wdata_send_pages(struct cifs_writedata *wdata, unsigned int nr_pages,
2134 struct address_space *mapping, struct writeback_control *wbc)
2137 struct TCP_Server_Info *server;
2140 wdata->sync_mode = wbc->sync_mode;
2141 wdata->nr_pages = nr_pages;
2142 wdata->offset = page_offset(wdata->pages[0]);
2143 wdata->pagesz = PAGE_SIZE;
2144 wdata->tailsz = min(i_size_read(mapping->host) -
2145 page_offset(wdata->pages[nr_pages - 1]),
2147 wdata->bytes = ((nr_pages - 1) * PAGE_SIZE) + wdata->tailsz;
2149 if (wdata->cfile != NULL)
2150 cifsFileInfo_put(wdata->cfile);
2151 wdata->cfile = find_writable_file(CIFS_I(mapping->host), false);
2152 if (!wdata->cfile) {
2153 cifs_dbg(VFS, "No writable handles for inode\n");
2156 wdata->pid = wdata->cfile->pid;
2157 server = tlink_tcon(wdata->cfile->tlink)->ses->server;
2158 rc = server->ops->async_writev(wdata, cifs_writedata_release);
2161 for (i = 0; i < nr_pages; ++i)
2162 unlock_page(wdata->pages[i]);
2167 static int cifs_writepages(struct address_space *mapping,
2168 struct writeback_control *wbc)
2170 struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb);
2171 struct TCP_Server_Info *server;
2172 bool done = false, scanned = false, range_whole = false;
2174 struct cifs_writedata *wdata;
2178 * If wsize is smaller than the page cache size, default to writing
2179 * one page at a time via cifs_writepage
2181 if (cifs_sb->wsize < PAGE_SIZE)
2182 return generic_writepages(mapping, wbc);
2184 if (wbc->range_cyclic) {
2185 index = mapping->writeback_index; /* Start from prev offset */
2188 index = wbc->range_start >> PAGE_SHIFT;
2189 end = wbc->range_end >> PAGE_SHIFT;
2190 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2194 server = cifs_sb_master_tcon(cifs_sb)->ses->server;
2196 while (!done && index <= end) {
2197 unsigned int i, nr_pages, found_pages, wsize, credits;
2198 pgoff_t next = 0, tofind, saved_index = index;
2200 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2205 tofind = min((wsize / PAGE_SIZE) - 1, end - index) + 1;
2207 wdata = wdata_alloc_and_fillpages(tofind, mapping, end, &index,
2211 add_credits_and_wake_if(server, credits, 0);
2215 if (found_pages == 0) {
2216 kref_put(&wdata->refcount, cifs_writedata_release);
2217 add_credits_and_wake_if(server, credits, 0);
2221 nr_pages = wdata_prepare_pages(wdata, found_pages, mapping, wbc,
2222 end, &index, &next, &done);
2224 /* nothing to write? */
2225 if (nr_pages == 0) {
2226 kref_put(&wdata->refcount, cifs_writedata_release);
2227 add_credits_and_wake_if(server, credits, 0);
2231 wdata->credits = credits;
2233 rc = wdata_send_pages(wdata, nr_pages, mapping, wbc);
2235 /* send failure -- clean up the mess */
2237 add_credits_and_wake_if(server, wdata->credits, 0);
2238 for (i = 0; i < nr_pages; ++i) {
2240 redirty_page_for_writepage(wbc,
2243 SetPageError(wdata->pages[i]);
2244 end_page_writeback(wdata->pages[i]);
2245 put_page(wdata->pages[i]);
2248 mapping_set_error(mapping, rc);
2250 kref_put(&wdata->refcount, cifs_writedata_release);
2252 if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) {
2253 index = saved_index;
2257 wbc->nr_to_write -= nr_pages;
2258 if (wbc->nr_to_write <= 0)
2264 if (!scanned && !done) {
2266 * We hit the last page and there is more work to be done: wrap
2267 * back to the start of the file
2274 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
2275 mapping->writeback_index = index;
2281 cifs_writepage_locked(struct page *page, struct writeback_control *wbc)
2287 /* BB add check for wbc flags */
2289 if (!PageUptodate(page))
2290 cifs_dbg(FYI, "ppw - page not up to date\n");
2293 * Set the "writeback" flag, and clear "dirty" in the radix tree.
2295 * A writepage() implementation always needs to do either this,
2296 * or re-dirty the page with "redirty_page_for_writepage()" in
2297 * the case of a failure.
2299 * Just unlocking the page will cause the radix tree tag-bits
2300 * to fail to update with the state of the page correctly.
2302 set_page_writeback(page);
2304 rc = cifs_partialpagewrite(page, 0, PAGE_SIZE);
2305 if (rc == -EAGAIN && wbc->sync_mode == WB_SYNC_ALL)
2307 else if (rc == -EAGAIN)
2308 redirty_page_for_writepage(wbc, page);
2312 SetPageUptodate(page);
2313 end_page_writeback(page);
2319 static int cifs_writepage(struct page *page, struct writeback_control *wbc)
2321 int rc = cifs_writepage_locked(page, wbc);
2326 static int cifs_write_end(struct file *file, struct address_space *mapping,
2327 loff_t pos, unsigned len, unsigned copied,
2328 struct page *page, void *fsdata)
2331 struct inode *inode = mapping->host;
2332 struct cifsFileInfo *cfile = file->private_data;
2333 struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb);
2336 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2339 pid = current->tgid;
2341 cifs_dbg(FYI, "write_end for page %p from pos %lld with %d bytes\n",
2344 if (PageChecked(page)) {
2346 SetPageUptodate(page);
2347 ClearPageChecked(page);
2348 } else if (!PageUptodate(page) && copied == PAGE_SIZE)
2349 SetPageUptodate(page);
2351 if (!PageUptodate(page)) {
2353 unsigned offset = pos & (PAGE_SIZE - 1);
2357 /* this is probably better than directly calling
2358 partialpage_write since in this function the file handle is
2359 known which we might as well leverage */
2360 /* BB check if anything else missing out of ppw
2361 such as updating last write time */
2362 page_data = kmap(page);
2363 rc = cifs_write(cfile, pid, page_data + offset, copied, &pos);
2364 /* if (rc < 0) should we set writebehind rc? */
2371 set_page_dirty(page);
2375 spin_lock(&inode->i_lock);
2376 if (pos > inode->i_size)
2377 i_size_write(inode, pos);
2378 spin_unlock(&inode->i_lock);
2387 int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
2392 struct cifs_tcon *tcon;
2393 struct TCP_Server_Info *server;
2394 struct cifsFileInfo *smbfile = file->private_data;
2395 struct inode *inode = file_inode(file);
2396 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2398 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2405 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2408 if (!CIFS_CACHE_READ(CIFS_I(inode))) {
2409 rc = cifs_zap_mapping(inode);
2411 cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);
2412 rc = 0; /* don't care about it in fsync */
2416 tcon = tlink_tcon(smbfile->tlink);
2417 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2418 server = tcon->ses->server;
2419 if (server->ops->flush)
2420 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2426 inode_unlock(inode);
2430 int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2434 struct cifs_tcon *tcon;
2435 struct TCP_Server_Info *server;
2436 struct cifsFileInfo *smbfile = file->private_data;
2437 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
2438 struct inode *inode = file->f_mapping->host;
2440 rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
2447 cifs_dbg(FYI, "Sync file - name: %pD datasync: 0x%x\n",
2450 tcon = tlink_tcon(smbfile->tlink);
2451 if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
2452 server = tcon->ses->server;
2453 if (server->ops->flush)
2454 rc = server->ops->flush(xid, tcon, &smbfile->fid);
2460 inode_unlock(inode);
2465 * As file closes, flush all cached write data for this inode checking
2466 * for write behind errors.
2468 int cifs_flush(struct file *file, fl_owner_t id)
2470 struct inode *inode = file_inode(file);
2473 if (file->f_mode & FMODE_WRITE)
2474 rc = filemap_write_and_wait(inode->i_mapping);
2476 cifs_dbg(FYI, "Flush inode %p file %p rc %d\n", inode, file, rc);
2482 cifs_write_allocate_pages(struct page **pages, unsigned long num_pages)
2487 for (i = 0; i < num_pages; i++) {
2488 pages[i] = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2491 * save number of pages we have already allocated and
2492 * return with ENOMEM error
2501 for (i = 0; i < num_pages; i++)
2508 size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
2513 clen = min_t(const size_t, len, wsize);
2514 num_pages = DIV_ROUND_UP(clen, PAGE_SIZE);
2523 cifs_uncached_writedata_release(struct kref *refcount)
2526 struct cifs_writedata *wdata = container_of(refcount,
2527 struct cifs_writedata, refcount);
2529 for (i = 0; i < wdata->nr_pages; i++)
2530 put_page(wdata->pages[i]);
2531 cifs_writedata_release(refcount);
2535 cifs_uncached_writev_complete(struct work_struct *work)
2537 struct cifs_writedata *wdata = container_of(work,
2538 struct cifs_writedata, work);
2539 struct inode *inode = d_inode(wdata->cfile->dentry);
2540 struct cifsInodeInfo *cifsi = CIFS_I(inode);
2542 spin_lock(&inode->i_lock);
2543 cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
2544 if (cifsi->server_eof > inode->i_size)
2545 i_size_write(inode, cifsi->server_eof);
2546 spin_unlock(&inode->i_lock);
2548 complete(&wdata->done);
2550 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2554 wdata_fill_from_iovec(struct cifs_writedata *wdata, struct iov_iter *from,
2555 size_t *len, unsigned long *num_pages)
2557 size_t save_len, copied, bytes, cur_len = *len;
2558 unsigned long i, nr_pages = *num_pages;
2561 for (i = 0; i < nr_pages; i++) {
2562 bytes = min_t(const size_t, cur_len, PAGE_SIZE);
2563 copied = copy_page_from_iter(wdata->pages[i], 0, bytes, from);
2566 * If we didn't copy as much as we expected, then that
2567 * may mean we trod into an unmapped area. Stop copying
2568 * at that point. On the next pass through the big
2569 * loop, we'll likely end up getting a zero-length
2570 * write and bailing out of it.
2575 cur_len = save_len - cur_len;
2579 * If we have no data to send, then that probably means that
2580 * the copy above failed altogether. That's most likely because
2581 * the address in the iovec was bogus. Return -EFAULT and let
2582 * the caller free anything we allocated and bail out.
2588 * i + 1 now represents the number of pages we actually used in
2589 * the copy phase above.
2596 cifs_write_from_iter(loff_t offset, size_t len, struct iov_iter *from,
2597 struct cifsFileInfo *open_file,
2598 struct cifs_sb_info *cifs_sb, struct list_head *wdata_list)
2602 unsigned long nr_pages, num_pages, i;
2603 struct cifs_writedata *wdata;
2604 struct iov_iter saved_from = *from;
2605 loff_t saved_offset = offset;
2607 struct TCP_Server_Info *server;
2609 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2610 pid = open_file->pid;
2612 pid = current->tgid;
2614 server = tlink_tcon(open_file->tlink)->ses->server;
2617 unsigned int wsize, credits;
2619 rc = server->ops->wait_mtu_credits(server, cifs_sb->wsize,
2624 nr_pages = get_numpages(wsize, len, &cur_len);
2625 wdata = cifs_writedata_alloc(nr_pages,
2626 cifs_uncached_writev_complete);
2629 add_credits_and_wake_if(server, credits, 0);
2633 rc = cifs_write_allocate_pages(wdata->pages, nr_pages);
2636 add_credits_and_wake_if(server, credits, 0);
2640 num_pages = nr_pages;
2641 rc = wdata_fill_from_iovec(wdata, from, &cur_len, &num_pages);
2643 for (i = 0; i < nr_pages; i++)
2644 put_page(wdata->pages[i]);
2646 add_credits_and_wake_if(server, credits, 0);
2651 * Bring nr_pages down to the number of pages we actually used,
2652 * and free any pages that we didn't use.
2654 for ( ; nr_pages > num_pages; nr_pages--)
2655 put_page(wdata->pages[nr_pages - 1]);
2657 wdata->sync_mode = WB_SYNC_ALL;
2658 wdata->nr_pages = nr_pages;
2659 wdata->offset = (__u64)offset;
2660 wdata->cfile = cifsFileInfo_get(open_file);
2662 wdata->bytes = cur_len;
2663 wdata->pagesz = PAGE_SIZE;
2664 wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
2665 wdata->credits = credits;
2667 if (!wdata->cfile->invalidHandle ||
2668 !(rc = cifs_reopen_file(wdata->cfile, false)))
2669 rc = server->ops->async_writev(wdata,
2670 cifs_uncached_writedata_release);
2672 add_credits_and_wake_if(server, wdata->credits, 0);
2673 kref_put(&wdata->refcount,
2674 cifs_uncached_writedata_release);
2675 if (rc == -EAGAIN) {
2677 iov_iter_advance(from, offset - saved_offset);
2683 list_add_tail(&wdata->list, wdata_list);
2691 ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)
2693 struct file *file = iocb->ki_filp;
2694 ssize_t total_written = 0;
2695 struct cifsFileInfo *open_file;
2696 struct cifs_tcon *tcon;
2697 struct cifs_sb_info *cifs_sb;
2698 struct cifs_writedata *wdata, *tmp;
2699 struct list_head wdata_list;
2700 struct iov_iter saved_from = *from;
2704 * BB - optimize the way when signing is disabled. We can drop this
2705 * extra memory-to-memory copying and use iovec buffers for constructing
2709 rc = generic_write_checks(iocb, from);
2713 INIT_LIST_HEAD(&wdata_list);
2714 cifs_sb = CIFS_FILE_SB(file);
2715 open_file = file->private_data;
2716 tcon = tlink_tcon(open_file->tlink);
2718 if (!tcon->ses->server->ops->async_writev)
2721 rc = cifs_write_from_iter(iocb->ki_pos, iov_iter_count(from), from,
2722 open_file, cifs_sb, &wdata_list);
2725 * If at least one write was successfully sent, then discard any rc
2726 * value from the later writes. If the other write succeeds, then
2727 * we'll end up returning whatever was written. If it fails, then
2728 * we'll get a new rc value from that.
2730 if (!list_empty(&wdata_list))
2734 * Wait for and collect replies for any successful sends in order of
2735 * increasing offset. Once an error is hit or we get a fatal signal
2736 * while waiting, then return without waiting for any more replies.
2739 list_for_each_entry_safe(wdata, tmp, &wdata_list, list) {
2741 /* FIXME: freezable too? */
2742 rc = wait_for_completion_killable(&wdata->done);
2745 else if (wdata->result)
2748 total_written += wdata->bytes;
2750 /* resend call if it's a retryable error */
2751 if (rc == -EAGAIN) {
2752 struct list_head tmp_list;
2753 struct iov_iter tmp_from = saved_from;
2755 INIT_LIST_HEAD(&tmp_list);
2756 list_del_init(&wdata->list);
2758 iov_iter_advance(&tmp_from,
2759 wdata->offset - iocb->ki_pos);
2761 rc = cifs_write_from_iter(wdata->offset,
2762 wdata->bytes, &tmp_from,
2763 open_file, cifs_sb, &tmp_list);
2765 list_splice(&tmp_list, &wdata_list);
2767 kref_put(&wdata->refcount,
2768 cifs_uncached_writedata_release);
2772 list_del_init(&wdata->list);
2773 kref_put(&wdata->refcount, cifs_uncached_writedata_release);
2776 if (unlikely(!total_written))
2779 iocb->ki_pos += total_written;
2780 set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(file_inode(file))->flags);
2781 cifs_stats_bytes_written(tcon, total_written);
2782 return total_written;
2786 cifs_writev(struct kiocb *iocb, struct iov_iter *from)
2788 struct file *file = iocb->ki_filp;
2789 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
2790 struct inode *inode = file->f_mapping->host;
2791 struct cifsInodeInfo *cinode = CIFS_I(inode);
2792 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
2796 * We need to hold the sem to be sure nobody modifies lock list
2797 * with a brlock that prevents writing.
2799 down_read(&cinode->lock_sem);
2802 rc = generic_write_checks(iocb, from);
2806 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(from),
2807 server->vals->exclusive_lock_type, NULL,
2809 rc = __generic_file_write_iter(iocb, from);
2813 inode_unlock(inode);
2816 rc = generic_write_sync(iocb, rc);
2817 up_read(&cinode->lock_sem);
2822 cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)
2824 struct inode *inode = file_inode(iocb->ki_filp);
2825 struct cifsInodeInfo *cinode = CIFS_I(inode);
2826 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
2827 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
2828 iocb->ki_filp->private_data;
2829 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
2832 written = cifs_get_writer(cinode);
2836 if (CIFS_CACHE_WRITE(cinode)) {
2837 if (cap_unix(tcon->ses) &&
2838 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability))
2839 && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) {
2840 written = generic_file_write_iter(iocb, from);
2843 written = cifs_writev(iocb, from);
2847 * For non-oplocked files in strict cache mode we need to write the data
2848 * to the server exactly from the pos to pos+len-1 rather than flush all
2849 * affected pages because it may cause a error with mandatory locks on
2850 * these pages but not on the region from pos to ppos+len-1.
2852 written = cifs_user_writev(iocb, from);
2853 if (CIFS_CACHE_READ(cinode)) {
2855 * We have read level caching and we have just sent a write
2856 * request to the server thus making data in the cache stale.
2857 * Zap the cache and set oplock/lease level to NONE to avoid
2858 * reading stale data from the cache. All subsequent read
2859 * operations will read new data from the server.
2861 cifs_zap_mapping(inode);
2862 cifs_dbg(FYI, "Set Oplock/Lease to NONE for inode=%p after write\n",
2867 cifs_put_writer(cinode);
2871 static struct cifs_readdata *
2872 cifs_readdata_alloc(unsigned int nr_pages, work_func_t complete)
2874 struct cifs_readdata *rdata;
2876 rdata = kzalloc(sizeof(*rdata) + (sizeof(struct page *) * nr_pages),
2878 if (rdata != NULL) {
2879 kref_init(&rdata->refcount);
2880 INIT_LIST_HEAD(&rdata->list);
2881 init_completion(&rdata->done);
2882 INIT_WORK(&rdata->work, complete);
2889 cifs_readdata_release(struct kref *refcount)
2891 struct cifs_readdata *rdata = container_of(refcount,
2892 struct cifs_readdata, refcount);
2895 cifsFileInfo_put(rdata->cfile);
2901 cifs_read_allocate_pages(struct cifs_readdata *rdata, unsigned int nr_pages)
2907 for (i = 0; i < nr_pages; i++) {
2908 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2913 rdata->pages[i] = page;
2917 unsigned int nr_page_failed = i;
2919 for (i = 0; i < nr_page_failed; i++) {
2920 put_page(rdata->pages[i]);
2921 rdata->pages[i] = NULL;
2928 cifs_uncached_readdata_release(struct kref *refcount)
2930 struct cifs_readdata *rdata = container_of(refcount,
2931 struct cifs_readdata, refcount);
2934 for (i = 0; i < rdata->nr_pages; i++) {
2935 put_page(rdata->pages[i]);
2936 rdata->pages[i] = NULL;
2938 cifs_readdata_release(refcount);
2942 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2943 * @rdata: the readdata response with list of pages holding data
2944 * @iter: destination for our data
2946 * This function copies data from a list of pages in a readdata response into
2947 * an array of iovecs. It will first calculate where the data should go
2948 * based on the info in the readdata and then copy the data into that spot.
2951 cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)
2953 size_t remaining = rdata->got_bytes;
2956 for (i = 0; i < rdata->nr_pages; i++) {
2957 struct page *page = rdata->pages[i];
2958 size_t copy = min_t(size_t, remaining, PAGE_SIZE);
2961 if (unlikely(iter->type & ITER_PIPE)) {
2962 void *addr = kmap_atomic(page);
2964 written = copy_to_iter(addr, copy, iter);
2965 kunmap_atomic(addr);
2967 written = copy_page_to_iter(page, 0, copy, iter);
2968 remaining -= written;
2969 if (written < copy && iov_iter_count(iter) > 0)
2972 return remaining ? -EFAULT : 0;
2976 cifs_uncached_readv_complete(struct work_struct *work)
2978 struct cifs_readdata *rdata = container_of(work,
2979 struct cifs_readdata, work);
2981 complete(&rdata->done);
2982 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2986 cifs_uncached_read_into_pages(struct TCP_Server_Info *server,
2987 struct cifs_readdata *rdata, unsigned int len)
2991 unsigned int nr_pages = rdata->nr_pages;
2993 rdata->got_bytes = 0;
2994 rdata->tailsz = PAGE_SIZE;
2995 for (i = 0; i < nr_pages; i++) {
2996 struct page *page = rdata->pages[i];
3000 /* no need to hold page hostage */
3001 rdata->pages[i] = NULL;
3007 if (len >= PAGE_SIZE) {
3008 /* enough data to fill the page */
3012 zero_user(page, len, PAGE_SIZE - len);
3013 rdata->tailsz = len;
3016 result = cifs_read_page_from_socket(server, page, n);
3020 rdata->got_bytes += result;
3023 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3024 rdata->got_bytes : result;
3028 cifs_send_async_read(loff_t offset, size_t len, struct cifsFileInfo *open_file,
3029 struct cifs_sb_info *cifs_sb, struct list_head *rdata_list)
3031 struct cifs_readdata *rdata;
3032 unsigned int npages, rsize, credits;
3036 struct TCP_Server_Info *server;
3038 server = tlink_tcon(open_file->tlink)->ses->server;
3040 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3041 pid = open_file->pid;
3043 pid = current->tgid;
3046 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3051 cur_len = min_t(const size_t, len, rsize);
3052 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
3054 /* allocate a readdata struct */
3055 rdata = cifs_readdata_alloc(npages,
3056 cifs_uncached_readv_complete);
3058 add_credits_and_wake_if(server, credits, 0);
3063 rc = cifs_read_allocate_pages(rdata, npages);
3067 rdata->cfile = cifsFileInfo_get(open_file);
3068 rdata->nr_pages = npages;
3069 rdata->offset = offset;
3070 rdata->bytes = cur_len;
3072 rdata->pagesz = PAGE_SIZE;
3073 rdata->read_into_pages = cifs_uncached_read_into_pages;
3074 rdata->credits = credits;
3076 if (!rdata->cfile->invalidHandle ||
3077 !(rc = cifs_reopen_file(rdata->cfile, true)))
3078 rc = server->ops->async_readv(rdata);
3081 add_credits_and_wake_if(server, rdata->credits, 0);
3082 kref_put(&rdata->refcount,
3083 cifs_uncached_readdata_release);
3089 list_add_tail(&rdata->list, rdata_list);
3097 ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)
3099 struct file *file = iocb->ki_filp;
3102 ssize_t total_read = 0;
3103 loff_t offset = iocb->ki_pos;
3104 struct cifs_sb_info *cifs_sb;
3105 struct cifs_tcon *tcon;
3106 struct cifsFileInfo *open_file;
3107 struct cifs_readdata *rdata, *tmp;
3108 struct list_head rdata_list;
3110 len = iov_iter_count(to);
3114 INIT_LIST_HEAD(&rdata_list);
3115 cifs_sb = CIFS_FILE_SB(file);
3116 open_file = file->private_data;
3117 tcon = tlink_tcon(open_file->tlink);
3119 if (!tcon->ses->server->ops->async_readv)
3122 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3123 cifs_dbg(FYI, "attempting read on write only file instance\n");
3125 rc = cifs_send_async_read(offset, len, open_file, cifs_sb, &rdata_list);
3127 /* if at least one read request send succeeded, then reset rc */
3128 if (!list_empty(&rdata_list))
3131 len = iov_iter_count(to);
3132 /* the loop below should proceed in the order of increasing offsets */
3134 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
3136 /* FIXME: freezable sleep too? */
3137 rc = wait_for_completion_killable(&rdata->done);
3140 else if (rdata->result == -EAGAIN) {
3141 /* resend call if it's a retryable error */
3142 struct list_head tmp_list;
3143 unsigned int got_bytes = rdata->got_bytes;
3145 list_del_init(&rdata->list);
3146 INIT_LIST_HEAD(&tmp_list);
3149 * Got a part of data and then reconnect has
3150 * happened -- fill the buffer and continue
3153 if (got_bytes && got_bytes < rdata->bytes) {
3154 rc = cifs_readdata_to_iov(rdata, to);
3156 kref_put(&rdata->refcount,
3157 cifs_uncached_readdata_release);
3162 rc = cifs_send_async_read(
3163 rdata->offset + got_bytes,
3164 rdata->bytes - got_bytes,
3165 rdata->cfile, cifs_sb,
3168 list_splice(&tmp_list, &rdata_list);
3170 kref_put(&rdata->refcount,
3171 cifs_uncached_readdata_release);
3173 } else if (rdata->result)
3176 rc = cifs_readdata_to_iov(rdata, to);
3178 /* if there was a short read -- discard anything left */
3179 if (rdata->got_bytes && rdata->got_bytes < rdata->bytes)
3182 list_del_init(&rdata->list);
3183 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
3186 total_read = len - iov_iter_count(to);
3188 cifs_stats_bytes_read(tcon, total_read);
3190 /* mask nodata case */
3195 iocb->ki_pos += total_read;
3202 cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)
3204 struct inode *inode = file_inode(iocb->ki_filp);
3205 struct cifsInodeInfo *cinode = CIFS_I(inode);
3206 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
3207 struct cifsFileInfo *cfile = (struct cifsFileInfo *)
3208 iocb->ki_filp->private_data;
3209 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3213 * In strict cache mode we need to read from the server all the time
3214 * if we don't have level II oplock because the server can delay mtime
3215 * change - so we can't make a decision about inode invalidating.
3216 * And we can also fail with pagereading if there are mandatory locks
3217 * on pages affected by this read but not on the region from pos to
3220 if (!CIFS_CACHE_READ(cinode))
3221 return cifs_user_readv(iocb, to);
3223 if (cap_unix(tcon->ses) &&
3224 (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&
3225 ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0))
3226 return generic_file_read_iter(iocb, to);
3229 * We need to hold the sem to be sure nobody modifies lock list
3230 * with a brlock that prevents reading.
3232 down_read(&cinode->lock_sem);
3233 if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),
3234 tcon->ses->server->vals->shared_lock_type,
3235 NULL, CIFS_READ_OP))
3236 rc = generic_file_read_iter(iocb, to);
3237 up_read(&cinode->lock_sem);
3242 cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset)
3245 unsigned int bytes_read = 0;
3246 unsigned int total_read;
3247 unsigned int current_read_size;
3249 struct cifs_sb_info *cifs_sb;
3250 struct cifs_tcon *tcon;
3251 struct TCP_Server_Info *server;
3254 struct cifsFileInfo *open_file;
3255 struct cifs_io_parms io_parms;
3256 int buf_type = CIFS_NO_BUFFER;
3260 cifs_sb = CIFS_FILE_SB(file);
3262 /* FIXME: set up handlers for larger reads and/or convert to async */
3263 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
3265 if (file->private_data == NULL) {
3270 open_file = file->private_data;
3271 tcon = tlink_tcon(open_file->tlink);
3272 server = tcon->ses->server;
3274 if (!server->ops->sync_read) {
3279 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3280 pid = open_file->pid;
3282 pid = current->tgid;
3284 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
3285 cifs_dbg(FYI, "attempting read on write only file instance\n");
3287 for (total_read = 0, cur_offset = read_data; read_size > total_read;
3288 total_read += bytes_read, cur_offset += bytes_read) {
3290 current_read_size = min_t(uint, read_size - total_read,
3293 * For windows me and 9x we do not want to request more
3294 * than it negotiated since it will refuse the read
3297 if (!(tcon->ses->capabilities &
3298 tcon->ses->server->vals->cap_large_files)) {
3299 current_read_size = min_t(uint,
3300 current_read_size, CIFSMaxBufSize);
3302 if (open_file->invalidHandle) {
3303 rc = cifs_reopen_file(open_file, true);
3308 io_parms.tcon = tcon;
3309 io_parms.offset = *offset;
3310 io_parms.length = current_read_size;
3311 rc = server->ops->sync_read(xid, &open_file->fid, &io_parms,
3312 &bytes_read, &cur_offset,
3314 } while (rc == -EAGAIN);
3316 if (rc || (bytes_read == 0)) {
3324 cifs_stats_bytes_read(tcon, total_read);
3325 *offset += bytes_read;
3333 * If the page is mmap'ed into a process' page tables, then we need to make
3334 * sure that it doesn't change while being written back.
3337 cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
3339 struct page *page = vmf->page;
3342 return VM_FAULT_LOCKED;
3345 static const struct vm_operations_struct cifs_file_vm_ops = {
3346 .fault = filemap_fault,
3347 .map_pages = filemap_map_pages,
3348 .page_mkwrite = cifs_page_mkwrite,
3351 int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)
3354 struct inode *inode = file_inode(file);
3358 if (!CIFS_CACHE_READ(CIFS_I(inode)))
3359 rc = cifs_zap_mapping(inode);
3361 rc = generic_file_mmap(file, vma);
3363 vma->vm_ops = &cifs_file_vm_ops;
3369 int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
3375 rc = cifs_revalidate_file(file);
3377 cifs_dbg(FYI, "Validation prior to mmap failed, error=%d\n",
3380 rc = generic_file_mmap(file, vma);
3382 vma->vm_ops = &cifs_file_vm_ops;
3389 cifs_readv_complete(struct work_struct *work)
3391 unsigned int i, got_bytes;
3392 struct cifs_readdata *rdata = container_of(work,
3393 struct cifs_readdata, work);
3395 got_bytes = rdata->got_bytes;
3396 for (i = 0; i < rdata->nr_pages; i++) {
3397 struct page *page = rdata->pages[i];
3399 lru_cache_add_file(page);
3401 if (rdata->result == 0 ||
3402 (rdata->result == -EAGAIN && got_bytes)) {
3403 flush_dcache_page(page);
3404 SetPageUptodate(page);
3409 if (rdata->result == 0 ||
3410 (rdata->result == -EAGAIN && got_bytes))
3411 cifs_readpage_to_fscache(rdata->mapping->host, page);
3413 got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
3416 rdata->pages[i] = NULL;
3418 kref_put(&rdata->refcount, cifs_readdata_release);
3422 cifs_readpages_read_into_pages(struct TCP_Server_Info *server,
3423 struct cifs_readdata *rdata, unsigned int len)
3429 unsigned int nr_pages = rdata->nr_pages;
3431 /* determine the eof that the server (probably) has */
3432 eof = CIFS_I(rdata->mapping->host)->server_eof;
3433 eof_index = eof ? (eof - 1) >> PAGE_SHIFT : 0;
3434 cifs_dbg(FYI, "eof=%llu eof_index=%lu\n", eof, eof_index);
3436 rdata->got_bytes = 0;
3437 rdata->tailsz = PAGE_SIZE;
3438 for (i = 0; i < nr_pages; i++) {
3439 struct page *page = rdata->pages[i];
3440 size_t n = PAGE_SIZE;
3442 if (len >= PAGE_SIZE) {
3444 } else if (len > 0) {
3445 /* enough for partial page, fill and zero the rest */
3446 zero_user(page, len, PAGE_SIZE - len);
3447 n = rdata->tailsz = len;
3449 } else if (page->index > eof_index) {
3451 * The VFS will not try to do readahead past the
3452 * i_size, but it's possible that we have outstanding
3453 * writes with gaps in the middle and the i_size hasn't
3454 * caught up yet. Populate those with zeroed out pages
3455 * to prevent the VFS from repeatedly attempting to
3456 * fill them until the writes are flushed.
3458 zero_user(page, 0, PAGE_SIZE);
3459 lru_cache_add_file(page);
3460 flush_dcache_page(page);
3461 SetPageUptodate(page);
3464 rdata->pages[i] = NULL;
3468 /* no need to hold page hostage */
3469 lru_cache_add_file(page);
3472 rdata->pages[i] = NULL;
3477 result = cifs_read_page_from_socket(server, page, n);
3481 rdata->got_bytes += result;
3484 return rdata->got_bytes > 0 && result != -ECONNABORTED ?
3485 rdata->got_bytes : result;
3489 readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
3490 unsigned int rsize, struct list_head *tmplist,
3491 unsigned int *nr_pages, loff_t *offset, unsigned int *bytes)
3493 struct page *page, *tpage;
3494 unsigned int expected_index;
3496 gfp_t gfp = readahead_gfp_mask(mapping);
3498 INIT_LIST_HEAD(tmplist);
3500 page = list_entry(page_list->prev, struct page, lru);
3503 * Lock the page and put it in the cache. Since no one else
3504 * should have access to this page, we're safe to simply set
3505 * PG_locked without checking it first.
3507 __SetPageLocked(page);
3508 rc = add_to_page_cache_locked(page, mapping,
3511 /* give up if we can't stick it in the cache */
3513 __ClearPageLocked(page);
3517 /* move first page to the tmplist */
3518 *offset = (loff_t)page->index << PAGE_SHIFT;
3521 list_move_tail(&page->lru, tmplist);
3523 /* now try and add more pages onto the request */
3524 expected_index = page->index + 1;
3525 list_for_each_entry_safe_reverse(page, tpage, page_list, lru) {
3526 /* discontinuity ? */
3527 if (page->index != expected_index)
3530 /* would this page push the read over the rsize? */
3531 if (*bytes + PAGE_SIZE > rsize)
3534 __SetPageLocked(page);
3535 rc = add_to_page_cache_locked(page, mapping, page->index, gfp);
3537 __ClearPageLocked(page);
3540 list_move_tail(&page->lru, tmplist);
3541 (*bytes) += PAGE_SIZE;
3548 static int cifs_readpages(struct file *file, struct address_space *mapping,
3549 struct list_head *page_list, unsigned num_pages)
3553 struct list_head tmplist;
3554 struct cifsFileInfo *open_file = file->private_data;
3555 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
3556 struct TCP_Server_Info *server;
3560 * Reads as many pages as possible from fscache. Returns -ENOBUFS
3561 * immediately if the cookie is negative
3563 * After this point, every page in the list might have PG_fscache set,
3564 * so we will need to clean that up off of every page we don't use.
3566 rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,
3571 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
3572 pid = open_file->pid;
3574 pid = current->tgid;
3577 server = tlink_tcon(open_file->tlink)->ses->server;
3579 cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n",
3580 __func__, file, mapping, num_pages);
3583 * Start with the page at end of list and move it to private
3584 * list. Do the same with any following pages until we hit
3585 * the rsize limit, hit an index discontinuity, or run out of
3586 * pages. Issue the async read and then start the loop again
3587 * until the list is empty.
3589 * Note that list order is important. The page_list is in
3590 * the order of declining indexes. When we put the pages in
3591 * the rdata->pages, then we want them in increasing order.
3593 while (!list_empty(page_list) && !err) {
3594 unsigned int i, nr_pages, bytes, rsize;
3596 struct page *page, *tpage;
3597 struct cifs_readdata *rdata;
3600 rc = server->ops->wait_mtu_credits(server, cifs_sb->rsize,
3606 * Give up immediately if rsize is too small to read an entire
3607 * page. The VFS will fall back to readpage. We should never
3608 * reach this point however since we set ra_pages to 0 when the
3609 * rsize is smaller than a cache page.
3611 if (unlikely(rsize < PAGE_SIZE)) {
3612 add_credits_and_wake_if(server, credits, 0);
3617 err = readpages_get_pages(mapping, page_list, rsize, &tmplist,
3618 &nr_pages, &offset, &bytes);
3620 add_credits_and_wake_if(server, credits, 0);
3624 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
3626 /* best to give up if we're out of mem */
3627 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3628 list_del(&page->lru);
3629 lru_cache_add_file(page);
3634 add_credits_and_wake_if(server, credits, 0);
3638 rdata->cfile = cifsFileInfo_get(open_file);
3639 rdata->mapping = mapping;
3640 rdata->offset = offset;
3641 rdata->bytes = bytes;
3643 rdata->pagesz = PAGE_SIZE;
3644 rdata->read_into_pages = cifs_readpages_read_into_pages;
3645 rdata->credits = credits;
3647 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
3648 list_del(&page->lru);
3649 rdata->pages[rdata->nr_pages++] = page;
3652 if (!rdata->cfile->invalidHandle ||
3653 !(rc = cifs_reopen_file(rdata->cfile, true)))
3654 rc = server->ops->async_readv(rdata);
3656 add_credits_and_wake_if(server, rdata->credits, 0);
3657 for (i = 0; i < rdata->nr_pages; i++) {
3658 page = rdata->pages[i];
3659 lru_cache_add_file(page);
3663 /* Fallback to the readpage in error/reconnect cases */
3664 kref_put(&rdata->refcount, cifs_readdata_release);
3668 kref_put(&rdata->refcount, cifs_readdata_release);
3671 /* Any pages that have been shown to fscache but didn't get added to
3672 * the pagecache must be uncached before they get returned to the
3675 cifs_fscache_readpages_cancel(mapping->host, page_list);
3680 * cifs_readpage_worker must be called with the page pinned
3682 static int cifs_readpage_worker(struct file *file, struct page *page,
3688 /* Is the page cached? */
3689 rc = cifs_readpage_from_fscache(file_inode(file), page);
3693 read_data = kmap(page);
3694 /* for reads over a certain size could initiate async read ahead */
3696 rc = cifs_read(file, read_data, PAGE_SIZE, poffset);
3701 cifs_dbg(FYI, "Bytes read %d\n", rc);
3703 file_inode(file)->i_atime =
3704 current_time(file_inode(file));
3707 memset(read_data + rc, 0, PAGE_SIZE - rc);
3709 flush_dcache_page(page);
3710 SetPageUptodate(page);
3712 /* send this page to the cache */
3713 cifs_readpage_to_fscache(file_inode(file), page);
3725 static int cifs_readpage(struct file *file, struct page *page)
3727 loff_t offset = (loff_t)page->index << PAGE_SHIFT;
3733 if (file->private_data == NULL) {
3739 cifs_dbg(FYI, "readpage %p at offset %d 0x%x\n",
3740 page, (int)offset, (int)offset);
3742 rc = cifs_readpage_worker(file, page, &offset);
3748 static int is_inode_writable(struct cifsInodeInfo *cifs_inode)
3750 struct cifsFileInfo *open_file;
3751 struct cifs_tcon *tcon =
3752 cifs_sb_master_tcon(CIFS_SB(cifs_inode->vfs_inode.i_sb));
3754 spin_lock(&tcon->open_file_lock);
3755 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
3756 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
3757 spin_unlock(&tcon->open_file_lock);
3761 spin_unlock(&tcon->open_file_lock);
3765 /* We do not want to update the file size from server for inodes
3766 open for write - to avoid races with writepage extending
3767 the file - in the future we could consider allowing
3768 refreshing the inode only on increases in the file size
3769 but this is tricky to do without racing with writebehind
3770 page caching in the current Linux kernel design */
3771 bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 end_of_file)
3776 if (is_inode_writable(cifsInode)) {
3777 /* This inode is open for write at least once */
3778 struct cifs_sb_info *cifs_sb;
3780 cifs_sb = CIFS_SB(cifsInode->vfs_inode.i_sb);
3781 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) {
3782 /* since no page cache to corrupt on directio
3783 we can change size safely */
3787 if (i_size_read(&cifsInode->vfs_inode) < end_of_file)
3795 static int cifs_write_begin(struct file *file, struct address_space *mapping,
3796 loff_t pos, unsigned len, unsigned flags,
3797 struct page **pagep, void **fsdata)
3800 pgoff_t index = pos >> PAGE_SHIFT;
3801 loff_t offset = pos & (PAGE_SIZE - 1);
3802 loff_t page_start = pos & PAGE_MASK;
3807 cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len);
3810 page = grab_cache_page_write_begin(mapping, index, flags);
3816 if (PageUptodate(page))
3820 * If we write a full page it will be up to date, no need to read from
3821 * the server. If the write is short, we'll end up doing a sync write
3824 if (len == PAGE_SIZE)
3828 * optimize away the read when we have an oplock, and we're not
3829 * expecting to use any of the data we'd be reading in. That
3830 * is, when the page lies beyond the EOF, or straddles the EOF
3831 * and the write will cover all of the existing data.
3833 if (CIFS_CACHE_READ(CIFS_I(mapping->host))) {
3834 i_size = i_size_read(mapping->host);
3835 if (page_start >= i_size ||
3836 (offset == 0 && (pos + len) >= i_size)) {
3837 zero_user_segments(page, 0, offset,
3841 * PageChecked means that the parts of the page
3842 * to which we're not writing are considered up
3843 * to date. Once the data is copied to the
3844 * page, it can be set uptodate.
3846 SetPageChecked(page);
3851 if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) {
3853 * might as well read a page, it is fast enough. If we get
3854 * an error, we don't need to return it. cifs_write_end will
3855 * do a sync write instead since PG_uptodate isn't set.
3857 cifs_readpage_worker(file, page, &page_start);
3862 /* we could try using another file handle if there is one -
3863 but how would we lock it to prevent close of that handle
3864 racing with this read? In any case
3865 this will be written out by write_end so is fine */
3872 static int cifs_release_page(struct page *page, gfp_t gfp)
3874 if (PagePrivate(page))
3877 return cifs_fscache_release_page(page, gfp);
3880 static void cifs_invalidate_page(struct page *page, unsigned int offset,
3881 unsigned int length)
3883 struct cifsInodeInfo *cifsi = CIFS_I(page->mapping->host);
3885 if (offset == 0 && length == PAGE_SIZE)
3886 cifs_fscache_invalidate_page(page, &cifsi->vfs_inode);
3889 static int cifs_launder_page(struct page *page)
3892 loff_t range_start = page_offset(page);
3893 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1);
3894 struct writeback_control wbc = {
3895 .sync_mode = WB_SYNC_ALL,
3897 .range_start = range_start,
3898 .range_end = range_end,
3901 cifs_dbg(FYI, "Launder page: %p\n", page);
3903 if (clear_page_dirty_for_io(page))
3904 rc = cifs_writepage_locked(page, &wbc);
3906 cifs_fscache_invalidate_page(page, page->mapping->host);
3910 void cifs_oplock_break(struct work_struct *work)
3912 struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo,
3914 struct inode *inode = d_inode(cfile->dentry);
3915 struct cifsInodeInfo *cinode = CIFS_I(inode);
3916 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
3917 struct TCP_Server_Info *server = tcon->ses->server;
3919 bool purge_cache = false;
3921 wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
3922 TASK_UNINTERRUPTIBLE);
3924 server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
3925 cfile->oplock_epoch, &purge_cache);
3927 if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
3928 cifs_has_mand_locks(cinode)) {
3929 cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n",
3934 if (inode && S_ISREG(inode->i_mode)) {
3935 if (CIFS_CACHE_READ(cinode))
3936 break_lease(inode, O_RDONLY);
3938 break_lease(inode, O_WRONLY);
3939 rc = filemap_fdatawrite(inode->i_mapping);
3940 if (!CIFS_CACHE_READ(cinode) || purge_cache) {
3941 rc = filemap_fdatawait(inode->i_mapping);
3942 mapping_set_error(inode->i_mapping, rc);
3943 cifs_zap_mapping(inode);
3945 cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
3946 if (CIFS_CACHE_WRITE(cinode))
3947 goto oplock_break_ack;
3950 rc = cifs_push_locks(cfile);
3952 cifs_dbg(VFS, "Push locks rc = %d\n", rc);
3956 * releasing stale oplock after recent reconnect of smb session using
3957 * a now incorrect file handle is not a data integrity issue but do
3958 * not bother sending an oplock release if session to server still is
3959 * disconnected since oplock already released by the server
3961 if (!cfile->oplock_break_cancelled) {
3962 rc = tcon->ses->server->ops->oplock_response(tcon, &cfile->fid,
3964 cifs_dbg(FYI, "Oplock release rc = %d\n", rc);
3966 _cifsFileInfo_put(cfile, false /* do not wait for ourself */);
3967 cifs_done_oplock_break(cinode);
3971 * The presence of cifs_direct_io() in the address space ops vector
3972 * allowes open() O_DIRECT flags which would have failed otherwise.
3974 * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests
3975 * so this method should never be called.
3977 * Direct IO is not yet supported in the cached mode.
3980 cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter)
3984 * Eventually need to support direct IO for non forcedirectio mounts
3990 const struct address_space_operations cifs_addr_ops = {
3991 .readpage = cifs_readpage,
3992 .readpages = cifs_readpages,
3993 .writepage = cifs_writepage,
3994 .writepages = cifs_writepages,
3995 .write_begin = cifs_write_begin,
3996 .write_end = cifs_write_end,
3997 .set_page_dirty = __set_page_dirty_nobuffers,
3998 .releasepage = cifs_release_page,
3999 .direct_IO = cifs_direct_io,
4000 .invalidatepage = cifs_invalidate_page,
4001 .launder_page = cifs_launder_page,
4005 * cifs_readpages requires the server to support a buffer large enough to
4006 * contain the header plus one complete page of data. Otherwise, we need
4007 * to leave cifs_readpages out of the address space operations.
4009 const struct address_space_operations cifs_addr_ops_smallbuf = {
4010 .readpage = cifs_readpage,
4011 .writepage = cifs_writepage,
4012 .writepages = cifs_writepages,
4013 .write_begin = cifs_write_begin,
4014 .write_end = cifs_write_end,
4015 .set_page_dirty = __set_page_dirty_nobuffers,
4016 .releasepage = cifs_release_page,
4017 .invalidatepage = cifs_invalidate_page,
4018 .launder_page = cifs_launder_page,