1 // SPDX-License-Identifier: LGPL-2.1
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Common Internet FileSystem (CIFS) client
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
13 #include <linux/module.h>
15 #include <linux/filelock.h>
16 #include <linux/mount.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/list.h>
20 #include <linux/seq_file.h>
21 #include <linux/vfs.h>
22 #include <linux/mempool.h>
23 #include <linux/delay.h>
24 #include <linux/kthread.h>
25 #include <linux/freezer.h>
26 #include <linux/namei.h>
27 #include <linux/random.h>
28 #include <linux/splice.h>
29 #include <linux/uuid.h>
30 #include <linux/xattr.h>
31 #include <uapi/linux/magic.h>
35 #define DECLARE_GLOBALS_HERE
37 #include "cifsproto.h"
38 #include "cifs_debug.h"
39 #include "cifs_fs_sb.h"
41 #include <linux/key-type.h>
42 #include "cifs_spnego.h"
44 #ifdef CONFIG_CIFS_DFS_UPCALL
45 #include "dfs_cache.h"
47 #ifdef CONFIG_CIFS_SWN_UPCALL
50 #include "fs_context.h"
51 #include "cached_dir.h"
54 * DOS dates from 1980/1/1 through 2107/12/31
55 * Protocol specifications indicate the range should be to 119, which
56 * limits maximum year to 2099. But this range has not been checked.
58 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
59 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
60 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
64 bool enable_oplocks = true;
65 bool linuxExtEnabled = true;
66 bool lookupCacheEnabled = true;
67 bool disable_legacy_dialects; /* false by default */
68 bool enable_gcm_256 = true;
69 bool require_gcm_256; /* false by default */
70 bool enable_negotiate_signing; /* false by default */
71 unsigned int global_secflags = CIFSSEC_DEF;
72 /* unsigned int ntlmv2_support = 0; */
73 unsigned int sign_CIFS_PDUs = 1;
76 * Global transaction id (XID) information
78 unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */
79 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
80 unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */
81 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
84 * Global counters, updated atomically
86 atomic_t sesInfoAllocCount;
87 atomic_t tconInfoAllocCount;
88 atomic_t tcpSesNextId;
89 atomic_t tcpSesAllocCount;
90 atomic_t tcpSesReconnectCount;
91 atomic_t tconInfoReconnectCount;
94 atomic_t buf_alloc_count;
95 atomic_t small_buf_alloc_count;
96 #ifdef CONFIG_CIFS_STATS2
97 atomic_t total_buf_alloc_count;
98 atomic_t total_small_buf_alloc_count;
100 struct list_head cifs_tcp_ses_list;
101 spinlock_t cifs_tcp_ses_lock;
102 static const struct super_operations cifs_super_ops;
103 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
104 module_param(CIFSMaxBufSize, uint, 0444);
105 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
106 "for CIFS requests. "
107 "Default: 16384 Range: 8192 to 130048");
108 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
109 module_param(cifs_min_rcv, uint, 0444);
110 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
112 unsigned int cifs_min_small = 30;
113 module_param(cifs_min_small, uint, 0444);
114 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
116 unsigned int cifs_max_pending = CIFS_MAX_REQ;
117 module_param(cifs_max_pending, uint, 0444);
118 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
119 "CIFS/SMB1 dialect (N/A for SMB3) "
120 "Default: 32767 Range: 2 to 32767.");
121 unsigned int dir_cache_timeout = 30;
122 module_param(dir_cache_timeout, uint, 0644);
123 MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
124 "Range: 1 to 65000 seconds, 0 to disable caching dir contents");
125 #ifdef CONFIG_CIFS_STATS2
126 unsigned int slow_rsp_threshold = 1;
127 module_param(slow_rsp_threshold, uint, 0644);
128 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
129 "before logging that a response is delayed. "
130 "Default: 1 (if set to 0 disables msg).");
133 module_param(enable_oplocks, bool, 0644);
134 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
136 module_param(enable_gcm_256, bool, 0644);
137 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0");
139 module_param(require_gcm_256, bool, 0644);
140 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
142 module_param(enable_negotiate_signing, bool, 0644);
143 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
145 module_param(disable_legacy_dialects, bool, 0644);
146 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
147 "helpful to restrict the ability to "
148 "override the default dialects (SMB2.1, "
149 "SMB3 and SMB3.02) on mount with old "
150 "dialects (CIFS/SMB1 and SMB2) since "
151 "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
152 " and less secure. Default: n/N/0");
154 extern mempool_t *cifs_sm_req_poolp;
155 extern mempool_t *cifs_req_poolp;
156 extern mempool_t *cifs_mid_poolp;
158 struct workqueue_struct *cifsiod_wq;
159 struct workqueue_struct *decrypt_wq;
160 struct workqueue_struct *fileinfo_put_wq;
161 struct workqueue_struct *cifsoplockd_wq;
162 struct workqueue_struct *deferredclose_wq;
163 struct workqueue_struct *serverclose_wq;
164 __u32 cifs_lock_secret;
167 * Bumps refcount for cifs super block.
168 * Note that it should be only called if a referece to VFS super block is
169 * already held, e.g. in open-type syscalls context. Otherwise it can race with
170 * atomic_dec_and_test in deactivate_locked_super.
173 cifs_sb_active(struct super_block *sb)
175 struct cifs_sb_info *server = CIFS_SB(sb);
177 if (atomic_inc_return(&server->active) == 1)
178 atomic_inc(&sb->s_active);
182 cifs_sb_deactive(struct super_block *sb)
184 struct cifs_sb_info *server = CIFS_SB(sb);
186 if (atomic_dec_and_test(&server->active))
187 deactivate_super(sb);
191 cifs_read_super(struct super_block *sb)
194 struct cifs_sb_info *cifs_sb;
195 struct cifs_tcon *tcon;
196 struct timespec64 ts;
199 cifs_sb = CIFS_SB(sb);
200 tcon = cifs_sb_master_tcon(cifs_sb);
202 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
203 sb->s_flags |= SB_POSIXACL;
205 if (tcon->snapshot_time)
206 sb->s_flags |= SB_RDONLY;
208 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
209 sb->s_maxbytes = MAX_LFS_FILESIZE;
211 sb->s_maxbytes = MAX_NON_LFS;
214 * Some very old servers like DOS and OS/2 used 2 second granularity
215 * (while all current servers use 100ns granularity - see MS-DTYP)
216 * but 1 second is the maximum allowed granularity for the VFS
217 * so for old servers set time granularity to 1 second while for
218 * everything else (current servers) set it to 100ns.
220 if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
221 ((tcon->ses->capabilities &
222 tcon->ses->server->vals->cap_nt_find) == 0) &&
224 sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
225 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
226 sb->s_time_min = ts.tv_sec;
227 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
228 cpu_to_le16(SMB_TIME_MAX), 0);
229 sb->s_time_max = ts.tv_sec;
232 * Almost every server, including all SMB2+, uses DCE TIME
233 * ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC
235 sb->s_time_gran = 100;
236 ts = cifs_NTtimeToUnix(0);
237 sb->s_time_min = ts.tv_sec;
238 ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
239 sb->s_time_max = ts.tv_sec;
242 sb->s_magic = CIFS_SUPER_MAGIC;
243 sb->s_op = &cifs_super_ops;
244 sb->s_xattr = cifs_xattr_handlers;
245 rc = super_setup_bdi(sb);
248 /* tune readahead according to rsize if readahead size not set on mount */
249 if (cifs_sb->ctx->rsize == 0)
250 cifs_sb->ctx->rsize =
251 tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
252 if (cifs_sb->ctx->rasize)
253 sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
255 sb->s_bdi->ra_pages = 2 * (cifs_sb->ctx->rsize / PAGE_SIZE);
257 sb->s_blocksize = CIFS_MAX_MSGSIZE;
258 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
259 inode = cifs_root_iget(sb);
267 sb->s_d_op = &cifs_ci_dentry_ops;
269 sb->s_d_op = &cifs_dentry_ops;
271 sb->s_root = d_make_root(inode);
277 #ifdef CONFIG_CIFS_NFSD_EXPORT
278 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
279 cifs_dbg(FYI, "export ops supported\n");
280 sb->s_export_op = &cifs_export_ops;
282 #endif /* CONFIG_CIFS_NFSD_EXPORT */
287 cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
291 static void cifs_kill_sb(struct super_block *sb)
293 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
296 * We ned to release all dentries for the cached directories
297 * before we kill the sb.
300 close_all_cached_dirs(cifs_sb);
302 /* finally release root dentry */
304 cifs_sb->root = NULL;
308 cifs_umount(cifs_sb);
312 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
314 struct super_block *sb = dentry->d_sb;
315 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
316 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
317 struct TCP_Server_Info *server = tcon->ses->server;
323 if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
325 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
327 buf->f_namelen = PATH_MAX;
329 buf->f_fsid.val[0] = tcon->vol_serial_number;
330 /* are using part of create time for more randomness, see man statfs */
331 buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time);
333 buf->f_files = 0; /* undefined */
334 buf->f_ffree = 0; /* unlimited */
336 if (server->ops->queryfs)
337 rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
343 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
345 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
346 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
347 struct TCP_Server_Info *server = tcon->ses->server;
349 if (server->ops->fallocate)
350 return server->ops->fallocate(file, tcon, mode, off, len);
355 static int cifs_permission(struct mnt_idmap *idmap,
356 struct inode *inode, int mask)
358 struct cifs_sb_info *cifs_sb;
360 cifs_sb = CIFS_SB(inode->i_sb);
362 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
363 if ((mask & MAY_EXEC) && !execute_ok(inode))
367 } else /* file mode might have been restricted at mount time
368 on the client (above and beyond ACL on servers) for
369 servers which do not support setting and viewing mode bits,
370 so allowing client to check permissions is useful */
371 return generic_permission(&nop_mnt_idmap, inode, mask);
374 static struct kmem_cache *cifs_inode_cachep;
375 static struct kmem_cache *cifs_req_cachep;
376 static struct kmem_cache *cifs_mid_cachep;
377 static struct kmem_cache *cifs_sm_req_cachep;
378 mempool_t *cifs_sm_req_poolp;
379 mempool_t *cifs_req_poolp;
380 mempool_t *cifs_mid_poolp;
382 static struct inode *
383 cifs_alloc_inode(struct super_block *sb)
385 struct cifsInodeInfo *cifs_inode;
386 cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
389 cifs_inode->cifsAttrs = 0x20; /* default */
390 cifs_inode->time = 0;
392 * Until the file is open and we have gotten oplock info back from the
393 * server, can not assume caching of file data or metadata.
395 cifs_set_oplock_level(cifs_inode, 0);
396 cifs_inode->lease_granted = false;
397 cifs_inode->flags = 0;
398 spin_lock_init(&cifs_inode->writers_lock);
399 cifs_inode->writers = 0;
400 cifs_inode->netfs.inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
401 cifs_inode->netfs.remote_i_size = 0;
402 cifs_inode->uniqueid = 0;
403 cifs_inode->createtime = 0;
404 cifs_inode->epoch = 0;
405 spin_lock_init(&cifs_inode->open_file_lock);
406 generate_random_uuid(cifs_inode->lease_key);
407 cifs_inode->symlink_target = NULL;
410 * Can not set i_flags here - they get immediately overwritten to zero
413 /* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
414 INIT_LIST_HEAD(&cifs_inode->openFileList);
415 INIT_LIST_HEAD(&cifs_inode->llist);
416 INIT_LIST_HEAD(&cifs_inode->deferred_closes);
417 spin_lock_init(&cifs_inode->deferred_lock);
418 return &cifs_inode->netfs.inode;
422 cifs_free_inode(struct inode *inode)
424 struct cifsInodeInfo *cinode = CIFS_I(inode);
426 if (S_ISLNK(inode->i_mode))
427 kfree(cinode->symlink_target);
428 kmem_cache_free(cifs_inode_cachep, cinode);
432 cifs_evict_inode(struct inode *inode)
434 truncate_inode_pages_final(&inode->i_data);
435 if (inode->i_state & I_PINNING_NETFS_WB)
436 cifs_fscache_unuse_inode_cookie(inode, true);
437 cifs_fscache_release_inode_cookie(inode);
442 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
444 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
445 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
447 seq_puts(s, ",addr=");
449 switch (server->dstaddr.ss_family) {
451 seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
454 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
455 if (sa6->sin6_scope_id)
456 seq_printf(s, "%%%u", sa6->sin6_scope_id);
459 seq_puts(s, "(unknown)");
462 seq_puts(s, ",rdma");
466 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
468 if (ses->sectype == Unspecified) {
469 if (ses->user_name == NULL)
470 seq_puts(s, ",sec=none");
474 seq_puts(s, ",sec=");
476 switch (ses->sectype) {
478 seq_puts(s, "ntlmv2");
484 seq_puts(s, "ntlmssp");
487 /* shouldn't ever happen */
488 seq_puts(s, "unknown");
495 if (ses->sectype == Kerberos)
496 seq_printf(s, ",cruid=%u",
497 from_kuid_munged(&init_user_ns, ses->cred_uid));
501 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
503 seq_puts(s, ",cache=");
505 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
506 seq_puts(s, "strict");
507 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
509 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
510 seq_puts(s, "singleclient"); /* assume only one client access */
511 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
512 seq_puts(s, "ro"); /* read only caching assumed */
514 seq_puts(s, "loose");
518 * cifs_show_devname() is used so we show the mount device name with correct
519 * format (e.g. forward slashes vs. back slashes) in /proc/mounts
521 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
523 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
524 char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
529 convert_delimiter(devname, '/');
530 /* escape all spaces in share names */
531 seq_escape(m, devname, " \t");
538 * cifs_show_options() is for displaying mount options in /proc/mounts.
539 * Not all settable options are displayed but most of the important
543 cifs_show_options(struct seq_file *s, struct dentry *root)
545 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
546 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
547 struct sockaddr *srcaddr;
548 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
550 seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
551 cifs_show_security(s, tcon->ses);
552 cifs_show_cache_flavor(s, cifs_sb);
555 seq_puts(s, ",nolease");
556 if (cifs_sb->ctx->multiuser)
557 seq_puts(s, ",multiuser");
558 else if (tcon->ses->user_name)
559 seq_show_option(s, "username", tcon->ses->user_name);
561 if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
562 seq_show_option(s, "domain", tcon->ses->domainName);
564 if (srcaddr->sa_family != AF_UNSPEC) {
565 struct sockaddr_in *saddr4;
566 struct sockaddr_in6 *saddr6;
567 saddr4 = (struct sockaddr_in *)srcaddr;
568 saddr6 = (struct sockaddr_in6 *)srcaddr;
569 if (srcaddr->sa_family == AF_INET6)
570 seq_printf(s, ",srcaddr=%pI6c",
572 else if (srcaddr->sa_family == AF_INET)
573 seq_printf(s, ",srcaddr=%pI4",
574 &saddr4->sin_addr.s_addr);
576 seq_printf(s, ",srcaddr=BAD-AF:%i",
577 (int)(srcaddr->sa_family));
580 seq_printf(s, ",uid=%u",
581 from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
582 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
583 seq_puts(s, ",forceuid");
585 seq_puts(s, ",noforceuid");
587 seq_printf(s, ",gid=%u",
588 from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
589 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
590 seq_puts(s, ",forcegid");
592 seq_puts(s, ",noforcegid");
594 cifs_show_address(s, tcon->ses->server);
597 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
598 cifs_sb->ctx->file_mode,
599 cifs_sb->ctx->dir_mode);
600 if (cifs_sb->ctx->iocharset)
601 seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
603 seq_puts(s, ",seal");
604 else if (tcon->ses->server->ignore_signature)
605 seq_puts(s, ",signloosely");
607 seq_puts(s, ",nocase");
609 seq_puts(s, ",nodelete");
610 if (cifs_sb->ctx->no_sparse)
611 seq_puts(s, ",nosparse");
612 if (tcon->local_lease)
613 seq_puts(s, ",locallease");
615 seq_puts(s, ",hard");
617 seq_puts(s, ",soft");
618 if (tcon->use_persistent)
619 seq_puts(s, ",persistenthandles");
620 else if (tcon->use_resilient)
621 seq_puts(s, ",resilienthandles");
622 if (tcon->posix_extensions)
623 seq_puts(s, ",posix");
624 else if (tcon->unix_ext)
625 seq_puts(s, ",unix");
627 seq_puts(s, ",nounix");
628 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
629 seq_puts(s, ",nodfs");
630 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
631 seq_puts(s, ",posixpaths");
632 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
633 seq_puts(s, ",setuids");
634 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
635 seq_puts(s, ",idsfromsid");
636 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
637 seq_puts(s, ",serverino");
638 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
639 seq_puts(s, ",rwpidforward");
640 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
641 seq_puts(s, ",forcemand");
642 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
643 seq_puts(s, ",nouser_xattr");
644 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
645 seq_puts(s, ",mapchars");
646 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
647 seq_puts(s, ",mapposix");
648 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
650 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
651 seq_puts(s, ",nobrl");
652 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
653 seq_puts(s, ",nohandlecache");
654 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
655 seq_puts(s, ",modefromsid");
656 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
657 seq_puts(s, ",cifsacl");
658 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
659 seq_puts(s, ",dynperm");
660 if (root->d_sb->s_flags & SB_POSIXACL)
662 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
663 seq_puts(s, ",mfsymlinks");
664 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
666 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
667 seq_puts(s, ",nostrictsync");
668 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
669 seq_puts(s, ",noperm");
670 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
671 seq_printf(s, ",backupuid=%u",
672 from_kuid_munged(&init_user_ns,
673 cifs_sb->ctx->backupuid));
674 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
675 seq_printf(s, ",backupgid=%u",
676 from_kgid_munged(&init_user_ns,
677 cifs_sb->ctx->backupgid));
679 seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
680 seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
681 seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
682 if (cifs_sb->ctx->rasize)
683 seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
684 if (tcon->ses->server->min_offload)
685 seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
686 if (tcon->ses->server->retrans)
687 seq_printf(s, ",retrans=%u", tcon->ses->server->retrans);
688 seq_printf(s, ",echo_interval=%lu",
689 tcon->ses->server->echo_interval / HZ);
691 /* Only display the following if overridden on mount */
692 if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
693 seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
694 if (tcon->ses->server->tcp_nodelay)
695 seq_puts(s, ",tcpnodelay");
696 if (tcon->ses->server->noautotune)
697 seq_puts(s, ",noautotune");
698 if (tcon->ses->server->noblocksnd)
699 seq_puts(s, ",noblocksend");
700 if (tcon->ses->server->nosharesock)
701 seq_puts(s, ",nosharesock");
703 if (tcon->snapshot_time)
704 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
705 if (tcon->handle_timeout)
706 seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
707 if (tcon->max_cached_dirs != MAX_CACHED_FIDS)
708 seq_printf(s, ",max_cached_dirs=%u", tcon->max_cached_dirs);
711 * Display file and directory attribute timeout in seconds.
712 * If file and directory attribute timeout the same then actimeo
713 * was likely specified on mount
715 if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
716 seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
718 seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
719 seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
721 seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
723 if (tcon->ses->chan_max > 1)
724 seq_printf(s, ",multichannel,max_channels=%zu",
725 tcon->ses->chan_max);
727 if (tcon->use_witness)
728 seq_puts(s, ",witness");
733 static void cifs_umount_begin(struct super_block *sb)
735 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
736 struct cifs_tcon *tcon;
741 tcon = cifs_sb_master_tcon(cifs_sb);
743 spin_lock(&cifs_tcp_ses_lock);
744 spin_lock(&tcon->tc_lock);
745 if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
746 /* we have other mounts to same share or we have
747 already tried to umount this and woken up
748 all waiting network requests, nothing to do */
749 spin_unlock(&tcon->tc_lock);
750 spin_unlock(&cifs_tcp_ses_lock);
754 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
755 * fail later (e.g. due to open files). TID_EXITING will be set just before tdis req sent
757 spin_unlock(&tcon->tc_lock);
758 spin_unlock(&cifs_tcp_ses_lock);
760 cifs_close_all_deferred_files(tcon);
761 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
762 /* cancel_notify_requests(tcon); */
763 if (tcon->ses && tcon->ses->server) {
764 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
765 wake_up_all(&tcon->ses->server->request_q);
766 wake_up_all(&tcon->ses->server->response_q);
767 msleep(1); /* yield */
768 /* we have to kick the requests once more */
769 wake_up_all(&tcon->ses->server->response_q);
776 static int cifs_freeze(struct super_block *sb)
778 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
779 struct cifs_tcon *tcon;
784 tcon = cifs_sb_master_tcon(cifs_sb);
786 cifs_close_all_deferred_files(tcon);
790 #ifdef CONFIG_CIFS_STATS2
791 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
798 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
800 return netfs_unpin_writeback(inode, wbc);
803 static int cifs_drop_inode(struct inode *inode)
805 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
807 /* no serverino => unconditional eviction */
808 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
809 generic_drop_inode(inode);
812 static const struct super_operations cifs_super_ops = {
813 .statfs = cifs_statfs,
814 .alloc_inode = cifs_alloc_inode,
815 .write_inode = cifs_write_inode,
816 .free_inode = cifs_free_inode,
817 .drop_inode = cifs_drop_inode,
818 .evict_inode = cifs_evict_inode,
819 /* .show_path = cifs_show_path, */ /* Would we ever need show path? */
820 .show_devname = cifs_show_devname,
821 /* .delete_inode = cifs_delete_inode, */ /* Do not need above
822 function unless later we add lazy close of inodes or unless the
823 kernel forgets to call us with the same number of releases (closes)
825 .show_options = cifs_show_options,
826 .umount_begin = cifs_umount_begin,
827 .freeze_fs = cifs_freeze,
828 #ifdef CONFIG_CIFS_STATS2
829 .show_stats = cifs_show_stats,
834 * Get root dentry from superblock according to prefix path mount option.
835 * Return dentry with refcount + 1 on success and NULL otherwise.
837 static struct dentry *
838 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
840 struct dentry *dentry;
841 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
842 char *full_path = NULL;
846 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
847 return dget(sb->s_root);
849 full_path = cifs_build_path_to_root(ctx, cifs_sb,
850 cifs_sb_master_tcon(cifs_sb), 0);
851 if (full_path == NULL)
852 return ERR_PTR(-ENOMEM);
854 cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
856 sep = CIFS_DIR_SEP(cifs_sb);
857 dentry = dget(sb->s_root);
861 struct inode *dir = d_inode(dentry);
862 struct dentry *child;
864 if (!S_ISDIR(dir->i_mode)) {
866 dentry = ERR_PTR(-ENOTDIR);
870 /* skip separators */
877 while (*s && *s != sep)
880 child = lookup_positive_unlocked(p, dentry, s - p);
883 } while (!IS_ERR(dentry));
888 static int cifs_set_super(struct super_block *sb, void *data)
890 struct cifs_mnt_data *mnt_data = data;
891 sb->s_fs_info = mnt_data->cifs_sb;
892 return set_anon_super(sb, NULL);
896 cifs_smb3_do_mount(struct file_system_type *fs_type,
897 int flags, struct smb3_fs_context *old_ctx)
899 struct cifs_mnt_data mnt_data;
900 struct cifs_sb_info *cifs_sb;
901 struct super_block *sb;
906 cifs_dbg(FYI, "%s: devname=%s flags=0x%x\n", __func__,
907 old_ctx->source, flags);
909 cifs_info("Attempting to mount %s\n", old_ctx->source);
912 cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
914 return ERR_PTR(-ENOMEM);
916 cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
918 root = ERR_PTR(-ENOMEM);
921 rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
927 rc = cifs_setup_cifs_sb(cifs_sb);
933 rc = cifs_mount(cifs_sb, cifs_sb->ctx);
935 if (!(flags & SB_SILENT))
936 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
942 mnt_data.ctx = cifs_sb->ctx;
943 mnt_data.cifs_sb = cifs_sb;
944 mnt_data.flags = flags;
946 /* BB should we make this contingent on mount parm? */
947 flags |= SB_NODIRATIME | SB_NOATIME;
949 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
951 cifs_umount(cifs_sb);
956 cifs_dbg(FYI, "Use existing superblock\n");
957 cifs_umount(cifs_sb);
960 rc = cifs_read_super(sb);
966 sb->s_flags |= SB_ACTIVE;
969 root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
974 cifs_sb->root = dget(root);
976 cifs_dbg(FYI, "dentry root is: %p\n", root);
980 deactivate_locked_super(sb);
983 kfree(cifs_sb->prepath);
984 smb3_cleanup_fs_context(cifs_sb->ctx);
991 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
994 struct inode *inode = file_inode(iocb->ki_filp);
996 if (iocb->ki_flags & IOCB_DIRECT)
997 return cifs_user_readv(iocb, iter);
999 rc = cifs_revalidate_mapping(inode);
1003 return generic_file_read_iter(iocb, iter);
1006 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1008 struct inode *inode = file_inode(iocb->ki_filp);
1009 struct cifsInodeInfo *cinode = CIFS_I(inode);
1013 if (iocb->ki_filp->f_flags & O_DIRECT) {
1014 written = cifs_user_writev(iocb, from);
1015 if (written > 0 && CIFS_CACHE_READ(cinode)) {
1016 cifs_zap_mapping(inode);
1018 "Set no oplock for inode=%p after a write operation\n",
1025 written = cifs_get_writer(cinode);
1029 written = generic_file_write_iter(iocb, from);
1031 if (CIFS_CACHE_WRITE(CIFS_I(inode)))
1034 rc = filemap_fdatawrite(inode->i_mapping);
1036 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
1040 cifs_put_writer(cinode);
1044 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1046 struct cifsFileInfo *cfile = file->private_data;
1047 struct cifs_tcon *tcon;
1050 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1051 * the cached file length
1053 if (whence != SEEK_SET && whence != SEEK_CUR) {
1055 struct inode *inode = file_inode(file);
1058 * We need to be sure that all dirty pages are written and the
1059 * server has the newest file length.
1061 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1062 inode->i_mapping->nrpages != 0) {
1063 rc = filemap_fdatawait(inode->i_mapping);
1065 mapping_set_error(inode->i_mapping, rc);
1070 * Some applications poll for the file length in this strange
1071 * way so we must seek to end on non-oplocked files by
1072 * setting the revalidate time to zero.
1074 CIFS_I(inode)->time = 0;
1076 rc = cifs_revalidate_file_attr(file);
1080 if (cfile && cfile->tlink) {
1081 tcon = tlink_tcon(cfile->tlink);
1082 if (tcon->ses->server->ops->llseek)
1083 return tcon->ses->server->ops->llseek(file, tcon,
1086 return generic_file_llseek(file, offset, whence);
1090 cifs_setlease(struct file *file, int arg, struct file_lock **lease, void **priv)
1093 * Note that this is called by vfs setlease with i_lock held to
1094 * protect *lease from going away.
1096 struct inode *inode = file_inode(file);
1097 struct cifsFileInfo *cfile = file->private_data;
1099 if (!(S_ISREG(inode->i_mode)))
1102 /* Check if file is oplocked if this is request for new lease */
1103 if (arg == F_UNLCK ||
1104 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1105 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1106 return generic_setlease(file, arg, lease, priv);
1107 else if (tlink_tcon(cfile->tlink)->local_lease &&
1108 !CIFS_CACHE_READ(CIFS_I(inode)))
1110 * If the server claims to support oplock on this file, then we
1111 * still need to check oplock even if the local_lease mount
1112 * option is set, but there are servers which do not support
1113 * oplock for which this mount option may be useful if the user
1114 * knows that the file won't be changed on the server by anyone
1117 return generic_setlease(file, arg, lease, priv);
1122 struct file_system_type cifs_fs_type = {
1123 .owner = THIS_MODULE,
1125 .init_fs_context = smb3_init_fs_context,
1126 .parameters = smb3_fs_parameters,
1127 .kill_sb = cifs_kill_sb,
1128 .fs_flags = FS_RENAME_DOES_D_MOVE,
1130 MODULE_ALIAS_FS("cifs");
1132 struct file_system_type smb3_fs_type = {
1133 .owner = THIS_MODULE,
1135 .init_fs_context = smb3_init_fs_context,
1136 .parameters = smb3_fs_parameters,
1137 .kill_sb = cifs_kill_sb,
1138 .fs_flags = FS_RENAME_DOES_D_MOVE,
1140 MODULE_ALIAS_FS("smb3");
1141 MODULE_ALIAS("smb3");
1143 const struct inode_operations cifs_dir_inode_ops = {
1144 .create = cifs_create,
1145 .atomic_open = cifs_atomic_open,
1146 .lookup = cifs_lookup,
1147 .getattr = cifs_getattr,
1148 .unlink = cifs_unlink,
1149 .link = cifs_hardlink,
1150 .mkdir = cifs_mkdir,
1151 .rmdir = cifs_rmdir,
1152 .rename = cifs_rename2,
1153 .permission = cifs_permission,
1154 .setattr = cifs_setattr,
1155 .symlink = cifs_symlink,
1156 .mknod = cifs_mknod,
1157 .listxattr = cifs_listxattr,
1158 .get_acl = cifs_get_acl,
1159 .set_acl = cifs_set_acl,
1162 const struct inode_operations cifs_file_inode_ops = {
1163 .setattr = cifs_setattr,
1164 .getattr = cifs_getattr,
1165 .permission = cifs_permission,
1166 .listxattr = cifs_listxattr,
1167 .fiemap = cifs_fiemap,
1168 .get_acl = cifs_get_acl,
1169 .set_acl = cifs_set_acl,
1172 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1173 struct delayed_call *done)
1178 return ERR_PTR(-ECHILD);
1180 target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1182 return ERR_PTR(-ENOMEM);
1184 spin_lock(&inode->i_lock);
1185 if (likely(CIFS_I(inode)->symlink_target)) {
1186 strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1189 target_path = ERR_PTR(-EOPNOTSUPP);
1191 spin_unlock(&inode->i_lock);
1193 if (!IS_ERR(target_path))
1194 set_delayed_call(done, kfree_link, target_path);
1199 const struct inode_operations cifs_symlink_inode_ops = {
1200 .get_link = cifs_get_link,
1201 .setattr = cifs_setattr,
1202 .permission = cifs_permission,
1203 .listxattr = cifs_listxattr,
1207 * Advance the EOF marker to after the source range.
1209 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1210 struct cifs_tcon *src_tcon,
1211 unsigned int xid, loff_t src_end)
1213 struct cifsFileInfo *writeable_srcfile;
1216 writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
1217 if (writeable_srcfile) {
1218 if (src_tcon->ses->server->ops->set_file_size)
1219 rc = src_tcon->ses->server->ops->set_file_size(
1220 xid, src_tcon, writeable_srcfile,
1221 src_inode->i_size, true /* no need to set sparse */);
1224 cifsFileInfo_put(writeable_srcfile);
1225 cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1231 netfs_resize_file(&src_cifsi->netfs, src_end, true);
1232 fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1236 return filemap_write_and_wait(src_inode->i_mapping);
1240 * Flush out either the folio that overlaps the beginning of a range in which
1241 * pos resides or the folio that overlaps the end of a range unless that folio
1242 * is entirely within the range we're going to invalidate. We extend the flush
1243 * bounds to encompass the folio.
1245 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1248 struct folio *folio;
1249 unsigned long long fpos, fend;
1250 pgoff_t index = pos / PAGE_SIZE;
1254 folio = filemap_get_folio(inode->i_mapping, index);
1258 size = folio_size(folio);
1259 fpos = folio_pos(folio);
1260 fend = fpos + size - 1;
1261 *_fstart = min_t(unsigned long long, *_fstart, fpos);
1262 *_fend = max_t(unsigned long long, *_fend, fend);
1263 if ((first && pos == fpos) || (!first && pos == fend))
1266 rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1272 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1273 struct file *dst_file, loff_t destoff, loff_t len,
1274 unsigned int remap_flags)
1276 struct inode *src_inode = file_inode(src_file);
1277 struct inode *target_inode = file_inode(dst_file);
1278 struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1279 struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1280 struct cifsFileInfo *smb_file_src = src_file->private_data;
1281 struct cifsFileInfo *smb_file_target = dst_file->private_data;
1282 struct cifs_tcon *target_tcon, *src_tcon;
1283 unsigned long long destend, fstart, fend, new_size;
1287 if (remap_flags & REMAP_FILE_DEDUP)
1289 if (remap_flags & ~REMAP_FILE_ADVISORY)
1292 cifs_dbg(FYI, "clone range\n");
1296 if (!smb_file_src || !smb_file_target) {
1298 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1302 src_tcon = tlink_tcon(smb_file_src->tlink);
1303 target_tcon = tlink_tcon(smb_file_target->tlink);
1306 * Note: cifs case is easier than btrfs since server responsible for
1307 * checks for proper open modes and file type and if it wants
1308 * server could even support copy of range where source = target
1310 lock_two_nondirectories(target_inode, src_inode);
1313 len = src_inode->i_size - off;
1315 cifs_dbg(FYI, "clone range\n");
1317 /* Flush the source buffer */
1318 rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1323 /* The server-side copy will fail if the source crosses the EOF marker.
1324 * Advance the EOF marker after the flush above to the end of the range
1325 * if it's short of that.
1327 if (src_cifsi->netfs.remote_i_size < off + len) {
1328 rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1333 new_size = destoff + len;
1334 destend = destoff + len - 1;
1336 /* Flush the folios at either end of the destination range to prevent
1337 * accidental loss of dirty data outside of the range.
1342 rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1345 rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1349 /* Discard all the folios that overlap the destination region. */
1350 cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1351 truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1353 fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1354 i_size_read(target_inode), 0);
1357 if (target_tcon->ses->server->ops->duplicate_extents) {
1358 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1359 smb_file_src, smb_file_target, off, len, destoff);
1360 if (rc == 0 && new_size > i_size_read(target_inode)) {
1361 truncate_setsize(target_inode, new_size);
1362 netfs_resize_file(&target_cifsi->netfs, new_size, true);
1363 fscache_resize_cookie(cifs_inode_cookie(target_inode),
1368 /* force revalidate of size and timestamps of target file now
1369 that target is updated on the server */
1370 CIFS_I(target_inode)->time = 0;
1372 /* although unlocking in the reverse order from locking is not
1373 strictly necessary here it is a little cleaner to be consistent */
1374 unlock_two_nondirectories(src_inode, target_inode);
1377 return rc < 0 ? rc : len;
1380 ssize_t cifs_file_copychunk_range(unsigned int xid,
1381 struct file *src_file, loff_t off,
1382 struct file *dst_file, loff_t destoff,
1383 size_t len, unsigned int flags)
1385 struct inode *src_inode = file_inode(src_file);
1386 struct inode *target_inode = file_inode(dst_file);
1387 struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1388 struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1389 struct cifsFileInfo *smb_file_src;
1390 struct cifsFileInfo *smb_file_target;
1391 struct cifs_tcon *src_tcon;
1392 struct cifs_tcon *target_tcon;
1393 unsigned long long destend, fstart, fend;
1396 cifs_dbg(FYI, "copychunk range\n");
1398 if (!src_file->private_data || !dst_file->private_data) {
1400 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1405 smb_file_target = dst_file->private_data;
1406 smb_file_src = src_file->private_data;
1407 src_tcon = tlink_tcon(smb_file_src->tlink);
1408 target_tcon = tlink_tcon(smb_file_target->tlink);
1410 if (src_tcon->ses != target_tcon->ses) {
1411 cifs_dbg(VFS, "source and target of copy not on same server\n");
1416 if (!target_tcon->ses->server->ops->copychunk_range)
1420 * Note: cifs case is easier than btrfs since server responsible for
1421 * checks for proper open modes and file type and if it wants
1422 * server could even support copy of range where source = target
1424 lock_two_nondirectories(target_inode, src_inode);
1426 cifs_dbg(FYI, "about to flush pages\n");
1428 rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1433 /* The server-side copy will fail if the source crosses the EOF marker.
1434 * Advance the EOF marker after the flush above to the end of the range
1435 * if it's short of that.
1437 if (src_cifsi->netfs.remote_i_size < off + len) {
1438 rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1443 destend = destoff + len - 1;
1445 /* Flush the folios at either end of the destination range to prevent
1446 * accidental loss of dirty data outside of the range.
1451 rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1454 rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1458 /* Discard all the folios that overlap the destination region. */
1459 truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1461 fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1462 i_size_read(target_inode), 0);
1464 rc = file_modified(dst_file);
1466 rc = target_tcon->ses->server->ops->copychunk_range(xid,
1467 smb_file_src, smb_file_target, off, len, destoff);
1468 if (rc > 0 && destoff + rc > i_size_read(target_inode)) {
1469 truncate_setsize(target_inode, destoff + rc);
1470 netfs_resize_file(&target_cifsi->netfs,
1471 i_size_read(target_inode), true);
1472 fscache_resize_cookie(cifs_inode_cookie(target_inode),
1473 i_size_read(target_inode));
1475 if (rc > 0 && destoff + rc > target_cifsi->netfs.zero_point)
1476 target_cifsi->netfs.zero_point = destoff + rc;
1479 file_accessed(src_file);
1481 /* force revalidate of size and timestamps of target file now
1482 * that target is updated on the server
1484 CIFS_I(target_inode)->time = 0;
1487 /* although unlocking in the reverse order from locking is not
1488 * strictly necessary here it is a little cleaner to be consistent
1490 unlock_two_nondirectories(src_inode, target_inode);
1497 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1498 * is a dummy operation.
1500 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1502 cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1508 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1509 struct file *dst_file, loff_t destoff,
1510 size_t len, unsigned int flags)
1512 unsigned int xid = get_xid();
1514 struct cifsFileInfo *cfile = dst_file->private_data;
1516 if (cfile->swapfile) {
1522 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1526 if (rc == -EOPNOTSUPP || rc == -EXDEV)
1527 rc = splice_copy_file_range(src_file, off, dst_file,
1532 const struct file_operations cifs_file_ops = {
1533 .read_iter = cifs_loose_read_iter,
1534 .write_iter = cifs_file_write_iter,
1536 .release = cifs_close,
1538 .flock = cifs_flock,
1539 .fsync = cifs_fsync,
1540 .flush = cifs_flush,
1541 .mmap = cifs_file_mmap,
1542 .splice_read = filemap_splice_read,
1543 .splice_write = iter_file_splice_write,
1544 .llseek = cifs_llseek,
1545 .unlocked_ioctl = cifs_ioctl,
1546 .copy_file_range = cifs_copy_file_range,
1547 .remap_file_range = cifs_remap_file_range,
1548 .setlease = cifs_setlease,
1549 .fallocate = cifs_fallocate,
1552 const struct file_operations cifs_file_strict_ops = {
1553 .read_iter = cifs_strict_readv,
1554 .write_iter = cifs_strict_writev,
1556 .release = cifs_close,
1558 .flock = cifs_flock,
1559 .fsync = cifs_strict_fsync,
1560 .flush = cifs_flush,
1561 .mmap = cifs_file_strict_mmap,
1562 .splice_read = filemap_splice_read,
1563 .splice_write = iter_file_splice_write,
1564 .llseek = cifs_llseek,
1565 .unlocked_ioctl = cifs_ioctl,
1566 .copy_file_range = cifs_copy_file_range,
1567 .remap_file_range = cifs_remap_file_range,
1568 .setlease = cifs_setlease,
1569 .fallocate = cifs_fallocate,
1572 const struct file_operations cifs_file_direct_ops = {
1573 .read_iter = cifs_direct_readv,
1574 .write_iter = cifs_direct_writev,
1576 .release = cifs_close,
1578 .flock = cifs_flock,
1579 .fsync = cifs_fsync,
1580 .flush = cifs_flush,
1581 .mmap = cifs_file_mmap,
1582 .splice_read = copy_splice_read,
1583 .splice_write = iter_file_splice_write,
1584 .unlocked_ioctl = cifs_ioctl,
1585 .copy_file_range = cifs_copy_file_range,
1586 .remap_file_range = cifs_remap_file_range,
1587 .llseek = cifs_llseek,
1588 .setlease = cifs_setlease,
1589 .fallocate = cifs_fallocate,
1592 const struct file_operations cifs_file_nobrl_ops = {
1593 .read_iter = cifs_loose_read_iter,
1594 .write_iter = cifs_file_write_iter,
1596 .release = cifs_close,
1597 .fsync = cifs_fsync,
1598 .flush = cifs_flush,
1599 .mmap = cifs_file_mmap,
1600 .splice_read = filemap_splice_read,
1601 .splice_write = iter_file_splice_write,
1602 .llseek = cifs_llseek,
1603 .unlocked_ioctl = cifs_ioctl,
1604 .copy_file_range = cifs_copy_file_range,
1605 .remap_file_range = cifs_remap_file_range,
1606 .setlease = cifs_setlease,
1607 .fallocate = cifs_fallocate,
1610 const struct file_operations cifs_file_strict_nobrl_ops = {
1611 .read_iter = cifs_strict_readv,
1612 .write_iter = cifs_strict_writev,
1614 .release = cifs_close,
1615 .fsync = cifs_strict_fsync,
1616 .flush = cifs_flush,
1617 .mmap = cifs_file_strict_mmap,
1618 .splice_read = filemap_splice_read,
1619 .splice_write = iter_file_splice_write,
1620 .llseek = cifs_llseek,
1621 .unlocked_ioctl = cifs_ioctl,
1622 .copy_file_range = cifs_copy_file_range,
1623 .remap_file_range = cifs_remap_file_range,
1624 .setlease = cifs_setlease,
1625 .fallocate = cifs_fallocate,
1628 const struct file_operations cifs_file_direct_nobrl_ops = {
1629 .read_iter = cifs_direct_readv,
1630 .write_iter = cifs_direct_writev,
1632 .release = cifs_close,
1633 .fsync = cifs_fsync,
1634 .flush = cifs_flush,
1635 .mmap = cifs_file_mmap,
1636 .splice_read = copy_splice_read,
1637 .splice_write = iter_file_splice_write,
1638 .unlocked_ioctl = cifs_ioctl,
1639 .copy_file_range = cifs_copy_file_range,
1640 .remap_file_range = cifs_remap_file_range,
1641 .llseek = cifs_llseek,
1642 .setlease = cifs_setlease,
1643 .fallocate = cifs_fallocate,
1646 const struct file_operations cifs_dir_ops = {
1647 .iterate_shared = cifs_readdir,
1648 .release = cifs_closedir,
1649 .read = generic_read_dir,
1650 .unlocked_ioctl = cifs_ioctl,
1651 .copy_file_range = cifs_copy_file_range,
1652 .remap_file_range = cifs_remap_file_range,
1653 .llseek = generic_file_llseek,
1654 .fsync = cifs_dir_fsync,
1658 cifs_init_once(void *inode)
1660 struct cifsInodeInfo *cifsi = inode;
1662 inode_init_once(&cifsi->netfs.inode);
1663 init_rwsem(&cifsi->lock_sem);
1667 cifs_init_inodecache(void)
1669 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1670 sizeof(struct cifsInodeInfo),
1671 0, (SLAB_RECLAIM_ACCOUNT|
1672 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1674 if (cifs_inode_cachep == NULL)
1681 cifs_destroy_inodecache(void)
1684 * Make sure all delayed rcu free inodes are flushed before we
1688 kmem_cache_destroy(cifs_inode_cachep);
1692 cifs_init_request_bufs(void)
1695 * SMB2 maximum header size is bigger than CIFS one - no problems to
1696 * allocate some more bytes for CIFS.
1698 size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1700 if (CIFSMaxBufSize < 8192) {
1701 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1702 Unicode path name has to fit in any SMB/CIFS path based frames */
1703 CIFSMaxBufSize = 8192;
1704 } else if (CIFSMaxBufSize > 1024*127) {
1705 CIFSMaxBufSize = 1024 * 127;
1707 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1710 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1711 CIFSMaxBufSize, CIFSMaxBufSize);
1713 cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1714 CIFSMaxBufSize + max_hdr_size, 0,
1715 SLAB_HWCACHE_ALIGN, 0,
1716 CIFSMaxBufSize + max_hdr_size,
1718 if (cifs_req_cachep == NULL)
1721 if (cifs_min_rcv < 1)
1723 else if (cifs_min_rcv > 64) {
1725 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1728 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1731 if (cifs_req_poolp == NULL) {
1732 kmem_cache_destroy(cifs_req_cachep);
1735 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1736 almost all handle based requests (but not write response, nor is it
1737 sufficient for path based requests). A smaller size would have
1738 been more efficient (compacting multiple slab items on one 4k page)
1739 for the case in which debug was on, but this larger size allows
1740 more SMBs to use small buffer alloc and is still much more
1741 efficient to alloc 1 per page off the slab compared to 17K (5page)
1742 alloc of large cifs buffers even when page debugging is on */
1743 cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1744 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1745 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1746 if (cifs_sm_req_cachep == NULL) {
1747 mempool_destroy(cifs_req_poolp);
1748 kmem_cache_destroy(cifs_req_cachep);
1752 if (cifs_min_small < 2)
1754 else if (cifs_min_small > 256) {
1755 cifs_min_small = 256;
1756 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1759 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1760 cifs_sm_req_cachep);
1762 if (cifs_sm_req_poolp == NULL) {
1763 mempool_destroy(cifs_req_poolp);
1764 kmem_cache_destroy(cifs_req_cachep);
1765 kmem_cache_destroy(cifs_sm_req_cachep);
1773 cifs_destroy_request_bufs(void)
1775 mempool_destroy(cifs_req_poolp);
1776 kmem_cache_destroy(cifs_req_cachep);
1777 mempool_destroy(cifs_sm_req_poolp);
1778 kmem_cache_destroy(cifs_sm_req_cachep);
1781 static int init_mids(void)
1783 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1784 sizeof(struct mid_q_entry), 0,
1785 SLAB_HWCACHE_ALIGN, NULL);
1786 if (cifs_mid_cachep == NULL)
1789 /* 3 is a reasonable minimum number of simultaneous operations */
1790 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1791 if (cifs_mid_poolp == NULL) {
1792 kmem_cache_destroy(cifs_mid_cachep);
1799 static void destroy_mids(void)
1801 mempool_destroy(cifs_mid_poolp);
1802 kmem_cache_destroy(cifs_mid_cachep);
1810 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1812 * Initialize Global counters
1814 atomic_set(&sesInfoAllocCount, 0);
1815 atomic_set(&tconInfoAllocCount, 0);
1816 atomic_set(&tcpSesNextId, 0);
1817 atomic_set(&tcpSesAllocCount, 0);
1818 atomic_set(&tcpSesReconnectCount, 0);
1819 atomic_set(&tconInfoReconnectCount, 0);
1821 atomic_set(&buf_alloc_count, 0);
1822 atomic_set(&small_buf_alloc_count, 0);
1823 #ifdef CONFIG_CIFS_STATS2
1824 atomic_set(&total_buf_alloc_count, 0);
1825 atomic_set(&total_small_buf_alloc_count, 0);
1826 if (slow_rsp_threshold < 1)
1827 cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1828 else if (slow_rsp_threshold > 32767)
1830 "slow response threshold set higher than recommended (0 to 32767)\n");
1831 #endif /* CONFIG_CIFS_STATS2 */
1833 atomic_set(&mid_count, 0);
1834 GlobalCurrentXid = 0;
1835 GlobalTotalActiveXid = 0;
1836 GlobalMaxActiveXid = 0;
1837 spin_lock_init(&cifs_tcp_ses_lock);
1838 spin_lock_init(&GlobalMid_Lock);
1840 cifs_lock_secret = get_random_u32();
1842 if (cifs_max_pending < 2) {
1843 cifs_max_pending = 2;
1844 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1845 } else if (cifs_max_pending > CIFS_MAX_REQ) {
1846 cifs_max_pending = CIFS_MAX_REQ;
1847 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1851 /* Limit max to about 18 hours, and setting to zero disables directory entry caching */
1852 if (dir_cache_timeout > 65000) {
1853 dir_cache_timeout = 65000;
1854 cifs_dbg(VFS, "dir_cache_timeout set to max of 65000 seconds\n");
1857 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1860 goto out_clean_proc;
1864 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1865 * so that we don't launch too many worker threads but
1866 * Documentation/core-api/workqueue.rst recommends setting it to 0
1869 /* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1870 decrypt_wq = alloc_workqueue("smb3decryptd",
1871 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1874 goto out_destroy_cifsiod_wq;
1877 fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1878 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1879 if (!fileinfo_put_wq) {
1881 goto out_destroy_decrypt_wq;
1884 cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1885 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1886 if (!cifsoplockd_wq) {
1888 goto out_destroy_fileinfo_put_wq;
1891 deferredclose_wq = alloc_workqueue("deferredclose",
1892 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1893 if (!deferredclose_wq) {
1895 goto out_destroy_cifsoplockd_wq;
1898 serverclose_wq = alloc_workqueue("serverclose",
1899 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1900 if (!serverclose_wq) {
1902 goto out_destroy_serverclose_wq;
1905 rc = cifs_init_inodecache();
1907 goto out_destroy_deferredclose_wq;
1911 goto out_destroy_inodecache;
1913 rc = cifs_init_request_bufs();
1915 goto out_destroy_mids;
1917 #ifdef CONFIG_CIFS_DFS_UPCALL
1918 rc = dfs_cache_init();
1920 goto out_destroy_request_bufs;
1921 #endif /* CONFIG_CIFS_DFS_UPCALL */
1922 #ifdef CONFIG_CIFS_UPCALL
1923 rc = init_cifs_spnego();
1925 goto out_destroy_dfs_cache;
1926 #endif /* CONFIG_CIFS_UPCALL */
1927 #ifdef CONFIG_CIFS_SWN_UPCALL
1928 rc = cifs_genl_init();
1930 goto out_register_key_type;
1931 #endif /* CONFIG_CIFS_SWN_UPCALL */
1933 rc = init_cifs_idmap();
1935 goto out_cifs_swn_init;
1937 rc = register_filesystem(&cifs_fs_type);
1939 goto out_init_cifs_idmap;
1941 rc = register_filesystem(&smb3_fs_type);
1943 unregister_filesystem(&cifs_fs_type);
1944 goto out_init_cifs_idmap;
1949 out_init_cifs_idmap:
1952 #ifdef CONFIG_CIFS_SWN_UPCALL
1954 out_register_key_type:
1956 #ifdef CONFIG_CIFS_UPCALL
1958 out_destroy_dfs_cache:
1960 #ifdef CONFIG_CIFS_DFS_UPCALL
1961 dfs_cache_destroy();
1962 out_destroy_request_bufs:
1964 cifs_destroy_request_bufs();
1967 out_destroy_inodecache:
1968 cifs_destroy_inodecache();
1969 out_destroy_deferredclose_wq:
1970 destroy_workqueue(deferredclose_wq);
1971 out_destroy_cifsoplockd_wq:
1972 destroy_workqueue(cifsoplockd_wq);
1973 out_destroy_fileinfo_put_wq:
1974 destroy_workqueue(fileinfo_put_wq);
1975 out_destroy_decrypt_wq:
1976 destroy_workqueue(decrypt_wq);
1977 out_destroy_cifsiod_wq:
1978 destroy_workqueue(cifsiod_wq);
1979 out_destroy_serverclose_wq:
1980 destroy_workqueue(serverclose_wq);
1989 cifs_dbg(NOISY, "exit_smb3\n");
1990 unregister_filesystem(&cifs_fs_type);
1991 unregister_filesystem(&smb3_fs_type);
1992 cifs_release_automount_timer();
1994 #ifdef CONFIG_CIFS_SWN_UPCALL
1997 #ifdef CONFIG_CIFS_UPCALL
2000 #ifdef CONFIG_CIFS_DFS_UPCALL
2001 dfs_cache_destroy();
2003 cifs_destroy_request_bufs();
2005 cifs_destroy_inodecache();
2006 destroy_workqueue(deferredclose_wq);
2007 destroy_workqueue(cifsoplockd_wq);
2008 destroy_workqueue(decrypt_wq);
2009 destroy_workqueue(fileinfo_put_wq);
2010 destroy_workqueue(serverclose_wq);
2011 destroy_workqueue(cifsiod_wq);
2015 MODULE_AUTHOR("Steve French");
2016 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
2018 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
2019 "also older servers complying with the SNIA CIFS Specification)");
2020 MODULE_VERSION(CIFS_VERSION);
2021 MODULE_SOFTDEP("ecb");
2022 MODULE_SOFTDEP("hmac");
2023 MODULE_SOFTDEP("md5");
2024 MODULE_SOFTDEP("nls");
2025 MODULE_SOFTDEP("aes");
2026 MODULE_SOFTDEP("cmac");
2027 MODULE_SOFTDEP("sha256");
2028 MODULE_SOFTDEP("sha512");
2029 MODULE_SOFTDEP("aead2");
2030 MODULE_SOFTDEP("ccm");
2031 MODULE_SOFTDEP("gcm");
2032 module_init(init_cifs)
2033 module_exit(exit_cifs)