1 // SPDX-License-Identifier: LGPL-2.1
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
7 * Common Internet FileSystem (CIFS) client
11 /* Note that BB means BUGBUG (ie something to fix eventually) */
13 #include <linux/module.h>
15 #include <linux/mount.h>
16 #include <linux/slab.h>
17 #include <linux/init.h>
18 #include <linux/list.h>
19 #include <linux/seq_file.h>
20 #include <linux/vfs.h>
21 #include <linux/mempool.h>
22 #include <linux/delay.h>
23 #include <linux/kthread.h>
24 #include <linux/freezer.h>
25 #include <linux/namei.h>
26 #include <linux/random.h>
27 #include <linux/uuid.h>
28 #include <linux/xattr.h>
29 #include <uapi/linux/magic.h>
33 #define DECLARE_GLOBALS_HERE
35 #include "cifsproto.h"
36 #include "cifs_debug.h"
37 #include "cifs_fs_sb.h"
39 #include <linux/key-type.h>
40 #include "cifs_spnego.h"
42 #ifdef CONFIG_CIFS_DFS_UPCALL
43 #include "dfs_cache.h"
45 #ifdef CONFIG_CIFS_SWN_UPCALL
48 #include "fs_context.h"
49 #include "cached_dir.h"
52 * DOS dates from 1980/1/1 through 2107/12/31
53 * Protocol specifications indicate the range should be to 119, which
54 * limits maximum year to 2099. But this range has not been checked.
56 #define SMB_DATE_MAX (127<<9 | 12<<5 | 31)
57 #define SMB_DATE_MIN (0<<9 | 1<<5 | 1)
58 #define SMB_TIME_MAX (23<<11 | 59<<5 | 29)
62 bool enable_oplocks = true;
63 bool linuxExtEnabled = true;
64 bool lookupCacheEnabled = true;
65 bool disable_legacy_dialects; /* false by default */
66 bool enable_gcm_256 = true;
67 bool require_gcm_256; /* false by default */
68 bool enable_negotiate_signing; /* false by default */
69 unsigned int global_secflags = CIFSSEC_DEF;
70 /* unsigned int ntlmv2_support = 0; */
71 unsigned int sign_CIFS_PDUs = 1;
74 * Global transaction id (XID) information
76 unsigned int GlobalCurrentXid; /* protected by GlobalMid_Sem */
77 unsigned int GlobalTotalActiveXid; /* prot by GlobalMid_Sem */
78 unsigned int GlobalMaxActiveXid; /* prot by GlobalMid_Sem */
79 spinlock_t GlobalMid_Lock; /* protects above & list operations on midQ entries */
82 * Global counters, updated atomically
84 atomic_t sesInfoAllocCount;
85 atomic_t tconInfoAllocCount;
86 atomic_t tcpSesNextId;
87 atomic_t tcpSesAllocCount;
88 atomic_t tcpSesReconnectCount;
89 atomic_t tconInfoReconnectCount;
92 atomic_t buf_alloc_count;
93 atomic_t small_buf_alloc_count;
94 #ifdef CONFIG_CIFS_STATS2
95 atomic_t total_buf_alloc_count;
96 atomic_t total_small_buf_alloc_count;
98 struct list_head cifs_tcp_ses_list;
99 spinlock_t cifs_tcp_ses_lock;
100 static const struct super_operations cifs_super_ops;
101 unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE;
102 module_param(CIFSMaxBufSize, uint, 0444);
103 MODULE_PARM_DESC(CIFSMaxBufSize, "Network buffer size (not including header) "
104 "for CIFS requests. "
105 "Default: 16384 Range: 8192 to 130048");
106 unsigned int cifs_min_rcv = CIFS_MIN_RCV_POOL;
107 module_param(cifs_min_rcv, uint, 0444);
108 MODULE_PARM_DESC(cifs_min_rcv, "Network buffers in pool. Default: 4 Range: "
110 unsigned int cifs_min_small = 30;
111 module_param(cifs_min_small, uint, 0444);
112 MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 "
114 unsigned int cifs_max_pending = CIFS_MAX_REQ;
115 module_param(cifs_max_pending, uint, 0444);
116 MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server for "
117 "CIFS/SMB1 dialect (N/A for SMB3) "
118 "Default: 32767 Range: 2 to 32767.");
119 #ifdef CONFIG_CIFS_STATS2
120 unsigned int slow_rsp_threshold = 1;
121 module_param(slow_rsp_threshold, uint, 0644);
122 MODULE_PARM_DESC(slow_rsp_threshold, "Amount of time (in seconds) to wait "
123 "before logging that a response is delayed. "
124 "Default: 1 (if set to 0 disables msg).");
127 module_param(enable_oplocks, bool, 0644);
128 MODULE_PARM_DESC(enable_oplocks, "Enable or disable oplocks. Default: y/Y/1");
130 module_param(enable_gcm_256, bool, 0644);
131 MODULE_PARM_DESC(enable_gcm_256, "Enable requesting strongest (256 bit) GCM encryption. Default: n/N/0");
133 module_param(require_gcm_256, bool, 0644);
134 MODULE_PARM_DESC(require_gcm_256, "Require strongest (256 bit) GCM encryption. Default: n/N/0");
136 module_param(enable_negotiate_signing, bool, 0644);
137 MODULE_PARM_DESC(enable_negotiate_signing, "Enable negotiating packet signing algorithm with server. Default: n/N/0");
139 module_param(disable_legacy_dialects, bool, 0644);
140 MODULE_PARM_DESC(disable_legacy_dialects, "To improve security it may be "
141 "helpful to restrict the ability to "
142 "override the default dialects (SMB2.1, "
143 "SMB3 and SMB3.02) on mount with old "
144 "dialects (CIFS/SMB1 and SMB2) since "
145 "vers=1.0 (CIFS/SMB1) and vers=2.0 are weaker"
146 " and less secure. Default: n/N/0");
148 extern mempool_t *cifs_sm_req_poolp;
149 extern mempool_t *cifs_req_poolp;
150 extern mempool_t *cifs_mid_poolp;
152 struct workqueue_struct *cifsiod_wq;
153 struct workqueue_struct *decrypt_wq;
154 struct workqueue_struct *fileinfo_put_wq;
155 struct workqueue_struct *cifsoplockd_wq;
156 struct workqueue_struct *deferredclose_wq;
157 struct workqueue_struct *serverclose_wq;
158 __u32 cifs_lock_secret;
161 * Bumps refcount for cifs super block.
162 * Note that it should be only called if a referece to VFS super block is
163 * already held, e.g. in open-type syscalls context. Otherwise it can race with
164 * atomic_dec_and_test in deactivate_locked_super.
167 cifs_sb_active(struct super_block *sb)
169 struct cifs_sb_info *server = CIFS_SB(sb);
171 if (atomic_inc_return(&server->active) == 1)
172 atomic_inc(&sb->s_active);
176 cifs_sb_deactive(struct super_block *sb)
178 struct cifs_sb_info *server = CIFS_SB(sb);
180 if (atomic_dec_and_test(&server->active))
181 deactivate_super(sb);
185 cifs_read_super(struct super_block *sb)
188 struct cifs_sb_info *cifs_sb;
189 struct cifs_tcon *tcon;
190 struct timespec64 ts;
193 cifs_sb = CIFS_SB(sb);
194 tcon = cifs_sb_master_tcon(cifs_sb);
196 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIXACL)
197 sb->s_flags |= SB_POSIXACL;
199 if (tcon->snapshot_time)
200 sb->s_flags |= SB_RDONLY;
202 if (tcon->ses->capabilities & tcon->ses->server->vals->cap_large_files)
203 sb->s_maxbytes = MAX_LFS_FILESIZE;
205 sb->s_maxbytes = MAX_NON_LFS;
208 * Some very old servers like DOS and OS/2 used 2 second granularity
209 * (while all current servers use 100ns granularity - see MS-DTYP)
210 * but 1 second is the maximum allowed granularity for the VFS
211 * so for old servers set time granularity to 1 second while for
212 * everything else (current servers) set it to 100ns.
214 if ((tcon->ses->server->vals->protocol_id == SMB10_PROT_ID) &&
215 ((tcon->ses->capabilities &
216 tcon->ses->server->vals->cap_nt_find) == 0) &&
218 sb->s_time_gran = 1000000000; /* 1 second is max allowed gran */
219 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MIN), 0, 0);
220 sb->s_time_min = ts.tv_sec;
221 ts = cnvrtDosUnixTm(cpu_to_le16(SMB_DATE_MAX),
222 cpu_to_le16(SMB_TIME_MAX), 0);
223 sb->s_time_max = ts.tv_sec;
226 * Almost every server, including all SMB2+, uses DCE TIME
227 * ie 100 nanosecond units, since 1601. See MS-DTYP and MS-FSCC
229 sb->s_time_gran = 100;
230 ts = cifs_NTtimeToUnix(0);
231 sb->s_time_min = ts.tv_sec;
232 ts = cifs_NTtimeToUnix(cpu_to_le64(S64_MAX));
233 sb->s_time_max = ts.tv_sec;
236 sb->s_magic = CIFS_SUPER_MAGIC;
237 sb->s_op = &cifs_super_ops;
238 sb->s_xattr = cifs_xattr_handlers;
239 rc = super_setup_bdi(sb);
242 /* tune readahead according to rsize if readahead size not set on mount */
243 if (cifs_sb->ctx->rsize == 0)
244 cifs_sb->ctx->rsize =
245 tcon->ses->server->ops->negotiate_rsize(tcon, cifs_sb->ctx);
246 if (cifs_sb->ctx->rasize)
247 sb->s_bdi->ra_pages = cifs_sb->ctx->rasize / PAGE_SIZE;
249 sb->s_bdi->ra_pages = cifs_sb->ctx->rsize / PAGE_SIZE;
251 sb->s_blocksize = CIFS_MAX_MSGSIZE;
252 sb->s_blocksize_bits = 14; /* default 2**14 = CIFS_MAX_MSGSIZE */
253 inode = cifs_root_iget(sb);
261 sb->s_d_op = &cifs_ci_dentry_ops;
263 sb->s_d_op = &cifs_dentry_ops;
265 sb->s_root = d_make_root(inode);
271 #ifdef CONFIG_CIFS_NFSD_EXPORT
272 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
273 cifs_dbg(FYI, "export ops supported\n");
274 sb->s_export_op = &cifs_export_ops;
276 #endif /* CONFIG_CIFS_NFSD_EXPORT */
281 cifs_dbg(VFS, "%s: get root inode failed\n", __func__);
285 static void cifs_kill_sb(struct super_block *sb)
287 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
290 * We ned to release all dentries for the cached directories
291 * before we kill the sb.
294 close_all_cached_dirs(cifs_sb);
296 /* finally release root dentry */
298 cifs_sb->root = NULL;
302 cifs_umount(cifs_sb);
306 cifs_statfs(struct dentry *dentry, struct kstatfs *buf)
308 struct super_block *sb = dentry->d_sb;
309 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
310 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
311 struct TCP_Server_Info *server = tcon->ses->server;
317 if (le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength) > 0)
319 le32_to_cpu(tcon->fsAttrInfo.MaxPathNameComponentLength);
321 buf->f_namelen = PATH_MAX;
323 buf->f_fsid.val[0] = tcon->vol_serial_number;
324 /* are using part of create time for more randomness, see man statfs */
325 buf->f_fsid.val[1] = (int)le64_to_cpu(tcon->vol_create_time);
327 buf->f_files = 0; /* undefined */
328 buf->f_ffree = 0; /* unlimited */
330 if (server->ops->queryfs)
331 rc = server->ops->queryfs(xid, tcon, cifs_sb, buf);
337 static long cifs_fallocate(struct file *file, int mode, loff_t off, loff_t len)
339 struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
340 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
341 struct TCP_Server_Info *server = tcon->ses->server;
343 if (server->ops->fallocate)
344 return server->ops->fallocate(file, tcon, mode, off, len);
349 static int cifs_permission(struct user_namespace *mnt_userns,
350 struct inode *inode, int mask)
352 struct cifs_sb_info *cifs_sb;
354 cifs_sb = CIFS_SB(inode->i_sb);
356 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) {
357 if ((mask & MAY_EXEC) && !execute_ok(inode))
361 } else /* file mode might have been restricted at mount time
362 on the client (above and beyond ACL on servers) for
363 servers which do not support setting and viewing mode bits,
364 so allowing client to check permissions is useful */
365 return generic_permission(&init_user_ns, inode, mask);
368 static struct kmem_cache *cifs_inode_cachep;
369 static struct kmem_cache *cifs_req_cachep;
370 static struct kmem_cache *cifs_mid_cachep;
371 static struct kmem_cache *cifs_sm_req_cachep;
372 mempool_t *cifs_sm_req_poolp;
373 mempool_t *cifs_req_poolp;
374 mempool_t *cifs_mid_poolp;
376 static struct inode *
377 cifs_alloc_inode(struct super_block *sb)
379 struct cifsInodeInfo *cifs_inode;
380 cifs_inode = alloc_inode_sb(sb, cifs_inode_cachep, GFP_KERNEL);
383 cifs_inode->cifsAttrs = 0x20; /* default */
384 cifs_inode->time = 0;
386 * Until the file is open and we have gotten oplock info back from the
387 * server, can not assume caching of file data or metadata.
389 cifs_set_oplock_level(cifs_inode, 0);
390 cifs_inode->flags = 0;
391 spin_lock_init(&cifs_inode->writers_lock);
392 cifs_inode->writers = 0;
393 cifs_inode->netfs.inode.i_blkbits = 14; /* 2**14 = CIFS_MAX_MSGSIZE */
394 cifs_inode->server_eof = 0;
395 cifs_inode->uniqueid = 0;
396 cifs_inode->createtime = 0;
397 cifs_inode->epoch = 0;
398 spin_lock_init(&cifs_inode->open_file_lock);
399 generate_random_uuid(cifs_inode->lease_key);
400 cifs_inode->symlink_target = NULL;
403 * Can not set i_flags here - they get immediately overwritten to zero
406 /* cifs_inode->netfs.inode.i_flags = S_NOATIME | S_NOCMTIME; */
407 INIT_LIST_HEAD(&cifs_inode->openFileList);
408 INIT_LIST_HEAD(&cifs_inode->llist);
409 INIT_LIST_HEAD(&cifs_inode->deferred_closes);
410 spin_lock_init(&cifs_inode->deferred_lock);
411 return &cifs_inode->netfs.inode;
415 cifs_free_inode(struct inode *inode)
417 struct cifsInodeInfo *cinode = CIFS_I(inode);
419 if (S_ISLNK(inode->i_mode))
420 kfree(cinode->symlink_target);
421 kmem_cache_free(cifs_inode_cachep, cinode);
425 cifs_evict_inode(struct inode *inode)
427 truncate_inode_pages_final(&inode->i_data);
428 if (inode->i_state & I_PINNING_FSCACHE_WB)
429 cifs_fscache_unuse_inode_cookie(inode, true);
430 cifs_fscache_release_inode_cookie(inode);
435 cifs_show_address(struct seq_file *s, struct TCP_Server_Info *server)
437 struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr;
438 struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *) &server->dstaddr;
440 seq_puts(s, ",addr=");
442 switch (server->dstaddr.ss_family) {
444 seq_printf(s, "%pI4", &sa->sin_addr.s_addr);
447 seq_printf(s, "%pI6", &sa6->sin6_addr.s6_addr);
448 if (sa6->sin6_scope_id)
449 seq_printf(s, "%%%u", sa6->sin6_scope_id);
452 seq_puts(s, "(unknown)");
455 seq_puts(s, ",rdma");
459 cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
461 if (ses->sectype == Unspecified) {
462 if (ses->user_name == NULL)
463 seq_puts(s, ",sec=none");
467 seq_puts(s, ",sec=");
469 switch (ses->sectype) {
471 seq_puts(s, "ntlmv2");
477 seq_puts(s, "ntlmssp");
480 /* shouldn't ever happen */
481 seq_puts(s, "unknown");
488 if (ses->sectype == Kerberos)
489 seq_printf(s, ",cruid=%u",
490 from_kuid_munged(&init_user_ns, ses->cred_uid));
494 cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
496 seq_puts(s, ",cache=");
498 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
499 seq_puts(s, "strict");
500 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
502 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RW_CACHE)
503 seq_puts(s, "singleclient"); /* assume only one client access */
504 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE)
505 seq_puts(s, "ro"); /* read only caching assumed */
507 seq_puts(s, "loose");
511 * cifs_show_devname() is used so we show the mount device name with correct
512 * format (e.g. forward slashes vs. back slashes) in /proc/mounts
514 static int cifs_show_devname(struct seq_file *m, struct dentry *root)
516 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
517 char *devname = kstrdup(cifs_sb->ctx->source, GFP_KERNEL);
522 convert_delimiter(devname, '/');
523 /* escape all spaces in share names */
524 seq_escape(m, devname, " \t");
531 * cifs_show_options() is for displaying mount options in /proc/mounts.
532 * Not all settable options are displayed but most of the important
536 cifs_show_options(struct seq_file *s, struct dentry *root)
538 struct cifs_sb_info *cifs_sb = CIFS_SB(root->d_sb);
539 struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
540 struct sockaddr *srcaddr;
541 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
543 seq_show_option(s, "vers", tcon->ses->server->vals->version_string);
544 cifs_show_security(s, tcon->ses);
545 cifs_show_cache_flavor(s, cifs_sb);
548 seq_puts(s, ",nolease");
549 if (cifs_sb->ctx->multiuser)
550 seq_puts(s, ",multiuser");
551 else if (tcon->ses->user_name)
552 seq_show_option(s, "username", tcon->ses->user_name);
554 if (tcon->ses->domainName && tcon->ses->domainName[0] != 0)
555 seq_show_option(s, "domain", tcon->ses->domainName);
557 if (srcaddr->sa_family != AF_UNSPEC) {
558 struct sockaddr_in *saddr4;
559 struct sockaddr_in6 *saddr6;
560 saddr4 = (struct sockaddr_in *)srcaddr;
561 saddr6 = (struct sockaddr_in6 *)srcaddr;
562 if (srcaddr->sa_family == AF_INET6)
563 seq_printf(s, ",srcaddr=%pI6c",
565 else if (srcaddr->sa_family == AF_INET)
566 seq_printf(s, ",srcaddr=%pI4",
567 &saddr4->sin_addr.s_addr);
569 seq_printf(s, ",srcaddr=BAD-AF:%i",
570 (int)(srcaddr->sa_family));
573 seq_printf(s, ",uid=%u",
574 from_kuid_munged(&init_user_ns, cifs_sb->ctx->linux_uid));
575 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_UID)
576 seq_puts(s, ",forceuid");
578 seq_puts(s, ",noforceuid");
580 seq_printf(s, ",gid=%u",
581 from_kgid_munged(&init_user_ns, cifs_sb->ctx->linux_gid));
582 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID)
583 seq_puts(s, ",forcegid");
585 seq_puts(s, ",noforcegid");
587 cifs_show_address(s, tcon->ses->server);
590 seq_printf(s, ",file_mode=0%ho,dir_mode=0%ho",
591 cifs_sb->ctx->file_mode,
592 cifs_sb->ctx->dir_mode);
593 if (cifs_sb->ctx->iocharset)
594 seq_printf(s, ",iocharset=%s", cifs_sb->ctx->iocharset);
596 seq_puts(s, ",seal");
597 else if (tcon->ses->server->ignore_signature)
598 seq_puts(s, ",signloosely");
600 seq_puts(s, ",nocase");
602 seq_puts(s, ",nodelete");
603 if (cifs_sb->ctx->no_sparse)
604 seq_puts(s, ",nosparse");
605 if (tcon->local_lease)
606 seq_puts(s, ",locallease");
608 seq_puts(s, ",hard");
610 seq_puts(s, ",soft");
611 if (tcon->use_persistent)
612 seq_puts(s, ",persistenthandles");
613 else if (tcon->use_resilient)
614 seq_puts(s, ",resilienthandles");
615 if (tcon->posix_extensions)
616 seq_puts(s, ",posix");
617 else if (tcon->unix_ext)
618 seq_puts(s, ",unix");
620 seq_puts(s, ",nounix");
621 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
622 seq_puts(s, ",nodfs");
623 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
624 seq_puts(s, ",posixpaths");
625 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
626 seq_puts(s, ",setuids");
627 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UID_FROM_ACL)
628 seq_puts(s, ",idsfromsid");
629 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
630 seq_puts(s, ",serverino");
631 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
632 seq_puts(s, ",rwpidforward");
633 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
634 seq_puts(s, ",forcemand");
635 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
636 seq_puts(s, ",nouser_xattr");
637 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
638 seq_puts(s, ",mapchars");
639 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
640 seq_puts(s, ",mapposix");
641 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
643 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
644 seq_puts(s, ",nobrl");
645 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_HANDLE_CACHE)
646 seq_puts(s, ",nohandlecache");
647 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MODE_FROM_SID)
648 seq_puts(s, ",modefromsid");
649 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
650 seq_puts(s, ",cifsacl");
651 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
652 seq_puts(s, ",dynperm");
653 if (root->d_sb->s_flags & SB_POSIXACL)
655 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS)
656 seq_puts(s, ",mfsymlinks");
657 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_FSCACHE)
659 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)
660 seq_puts(s, ",nostrictsync");
661 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
662 seq_puts(s, ",noperm");
663 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
664 seq_printf(s, ",backupuid=%u",
665 from_kuid_munged(&init_user_ns,
666 cifs_sb->ctx->backupuid));
667 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
668 seq_printf(s, ",backupgid=%u",
669 from_kgid_munged(&init_user_ns,
670 cifs_sb->ctx->backupgid));
672 seq_printf(s, ",rsize=%u", cifs_sb->ctx->rsize);
673 seq_printf(s, ",wsize=%u", cifs_sb->ctx->wsize);
674 seq_printf(s, ",bsize=%u", cifs_sb->ctx->bsize);
675 if (cifs_sb->ctx->rasize)
676 seq_printf(s, ",rasize=%u", cifs_sb->ctx->rasize);
677 if (tcon->ses->server->min_offload)
678 seq_printf(s, ",esize=%u", tcon->ses->server->min_offload);
679 seq_printf(s, ",echo_interval=%lu",
680 tcon->ses->server->echo_interval / HZ);
682 /* Only display the following if overridden on mount */
683 if (tcon->ses->server->max_credits != SMB2_MAX_CREDITS_AVAILABLE)
684 seq_printf(s, ",max_credits=%u", tcon->ses->server->max_credits);
685 if (tcon->ses->server->tcp_nodelay)
686 seq_puts(s, ",tcpnodelay");
687 if (tcon->ses->server->noautotune)
688 seq_puts(s, ",noautotune");
689 if (tcon->ses->server->noblocksnd)
690 seq_puts(s, ",noblocksend");
692 if (tcon->snapshot_time)
693 seq_printf(s, ",snapshot=%llu", tcon->snapshot_time);
694 if (tcon->handle_timeout)
695 seq_printf(s, ",handletimeout=%u", tcon->handle_timeout);
698 * Display file and directory attribute timeout in seconds.
699 * If file and directory attribute timeout the same then actimeo
700 * was likely specified on mount
702 if (cifs_sb->ctx->acdirmax == cifs_sb->ctx->acregmax)
703 seq_printf(s, ",actimeo=%lu", cifs_sb->ctx->acregmax / HZ);
705 seq_printf(s, ",acdirmax=%lu", cifs_sb->ctx->acdirmax / HZ);
706 seq_printf(s, ",acregmax=%lu", cifs_sb->ctx->acregmax / HZ);
708 seq_printf(s, ",closetimeo=%lu", cifs_sb->ctx->closetimeo / HZ);
710 if (tcon->ses->chan_max > 1)
711 seq_printf(s, ",multichannel,max_channels=%zu",
712 tcon->ses->chan_max);
714 if (tcon->use_witness)
715 seq_puts(s, ",witness");
720 static void cifs_umount_begin(struct super_block *sb)
722 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
723 struct cifs_tcon *tcon;
728 tcon = cifs_sb_master_tcon(cifs_sb);
730 spin_lock(&cifs_tcp_ses_lock);
731 spin_lock(&tcon->tc_lock);
732 if ((tcon->tc_count > 1) || (tcon->status == TID_EXITING)) {
733 /* we have other mounts to same share or we have
734 already tried to umount this and woken up
735 all waiting network requests, nothing to do */
736 spin_unlock(&tcon->tc_lock);
737 spin_unlock(&cifs_tcp_ses_lock);
741 * can not set tcon->status to TID_EXITING yet since we don't know if umount -f will
742 * fail later (e.g. due to open files). TID_EXITING will be set just before tdis req sent
744 spin_unlock(&tcon->tc_lock);
745 spin_unlock(&cifs_tcp_ses_lock);
747 cifs_close_all_deferred_files(tcon);
748 /* cancel_brl_requests(tcon); */ /* BB mark all brl mids as exiting */
749 /* cancel_notify_requests(tcon); */
750 if (tcon->ses && tcon->ses->server) {
751 cifs_dbg(FYI, "wake up tasks now - umount begin not complete\n");
752 wake_up_all(&tcon->ses->server->request_q);
753 wake_up_all(&tcon->ses->server->response_q);
754 msleep(1); /* yield */
755 /* we have to kick the requests once more */
756 wake_up_all(&tcon->ses->server->response_q);
763 static int cifs_freeze(struct super_block *sb)
765 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
766 struct cifs_tcon *tcon;
771 tcon = cifs_sb_master_tcon(cifs_sb);
773 cifs_close_all_deferred_files(tcon);
777 #ifdef CONFIG_CIFS_STATS2
778 static int cifs_show_stats(struct seq_file *s, struct dentry *root)
785 static int cifs_write_inode(struct inode *inode, struct writeback_control *wbc)
787 fscache_unpin_writeback(wbc, cifs_inode_cookie(inode));
791 static int cifs_drop_inode(struct inode *inode)
793 struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
795 /* no serverino => unconditional eviction */
796 return !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) ||
797 generic_drop_inode(inode);
800 static const struct super_operations cifs_super_ops = {
801 .statfs = cifs_statfs,
802 .alloc_inode = cifs_alloc_inode,
803 .write_inode = cifs_write_inode,
804 .free_inode = cifs_free_inode,
805 .drop_inode = cifs_drop_inode,
806 .evict_inode = cifs_evict_inode,
807 /* .show_path = cifs_show_path, */ /* Would we ever need show path? */
808 .show_devname = cifs_show_devname,
809 /* .delete_inode = cifs_delete_inode, */ /* Do not need above
810 function unless later we add lazy close of inodes or unless the
811 kernel forgets to call us with the same number of releases (closes)
813 .show_options = cifs_show_options,
814 .umount_begin = cifs_umount_begin,
815 .freeze_fs = cifs_freeze,
816 #ifdef CONFIG_CIFS_STATS2
817 .show_stats = cifs_show_stats,
822 * Get root dentry from superblock according to prefix path mount option.
823 * Return dentry with refcount + 1 on success and NULL otherwise.
825 static struct dentry *
826 cifs_get_root(struct smb3_fs_context *ctx, struct super_block *sb)
828 struct dentry *dentry;
829 struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
830 char *full_path = NULL;
834 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH)
835 return dget(sb->s_root);
837 full_path = cifs_build_path_to_root(ctx, cifs_sb,
838 cifs_sb_master_tcon(cifs_sb), 0);
839 if (full_path == NULL)
840 return ERR_PTR(-ENOMEM);
842 cifs_dbg(FYI, "Get root dentry for %s\n", full_path);
844 sep = CIFS_DIR_SEP(cifs_sb);
845 dentry = dget(sb->s_root);
849 struct inode *dir = d_inode(dentry);
850 struct dentry *child;
852 if (!S_ISDIR(dir->i_mode)) {
854 dentry = ERR_PTR(-ENOTDIR);
858 /* skip separators */
865 while (*s && *s != sep)
868 child = lookup_positive_unlocked(p, dentry, s - p);
871 } while (!IS_ERR(dentry));
876 static int cifs_set_super(struct super_block *sb, void *data)
878 struct cifs_mnt_data *mnt_data = data;
879 sb->s_fs_info = mnt_data->cifs_sb;
880 return set_anon_super(sb, NULL);
884 cifs_smb3_do_mount(struct file_system_type *fs_type,
885 int flags, struct smb3_fs_context *old_ctx)
887 struct cifs_mnt_data mnt_data;
888 struct cifs_sb_info *cifs_sb;
889 struct super_block *sb;
894 * Prints in Kernel / CIFS log the attempted mount operation
895 * If CIFS_DEBUG && cifs_FYI
898 cifs_dbg(FYI, "Devname: %s flags: %d\n", old_ctx->UNC, flags);
900 cifs_info("Attempting to mount %s\n", old_ctx->UNC);
902 cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL);
904 return ERR_PTR(-ENOMEM);
906 cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
908 root = ERR_PTR(-ENOMEM);
911 rc = smb3_fs_context_dup(cifs_sb->ctx, old_ctx);
917 rc = cifs_setup_volume_info(cifs_sb->ctx, NULL, NULL);
923 rc = cifs_setup_cifs_sb(cifs_sb);
929 rc = cifs_mount(cifs_sb, cifs_sb->ctx);
931 if (!(flags & SB_SILENT))
932 cifs_dbg(VFS, "cifs_mount failed w/return code = %d\n",
938 mnt_data.ctx = cifs_sb->ctx;
939 mnt_data.cifs_sb = cifs_sb;
940 mnt_data.flags = flags;
942 /* BB should we make this contingent on mount parm? */
943 flags |= SB_NODIRATIME | SB_NOATIME;
945 sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data);
947 cifs_umount(cifs_sb);
952 cifs_dbg(FYI, "Use existing superblock\n");
953 cifs_umount(cifs_sb);
956 rc = cifs_read_super(sb);
962 sb->s_flags |= SB_ACTIVE;
965 root = cifs_get_root(cifs_sb ? cifs_sb->ctx : old_ctx, sb);
970 cifs_sb->root = dget(root);
972 cifs_dbg(FYI, "dentry root is: %p\n", root);
976 deactivate_locked_super(sb);
979 kfree(cifs_sb->prepath);
980 smb3_cleanup_fs_context(cifs_sb->ctx);
987 cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
990 struct inode *inode = file_inode(iocb->ki_filp);
992 if (iocb->ki_flags & IOCB_DIRECT)
993 return cifs_user_readv(iocb, iter);
995 rc = cifs_revalidate_mapping(inode);
999 return generic_file_read_iter(iocb, iter);
1002 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
1004 struct inode *inode = file_inode(iocb->ki_filp);
1005 struct cifsInodeInfo *cinode = CIFS_I(inode);
1009 if (iocb->ki_filp->f_flags & O_DIRECT) {
1010 written = cifs_user_writev(iocb, from);
1011 if (written > 0 && CIFS_CACHE_READ(cinode)) {
1012 cifs_zap_mapping(inode);
1014 "Set no oplock for inode=%p after a write operation\n",
1021 written = cifs_get_writer(cinode);
1025 written = generic_file_write_iter(iocb, from);
1027 if (CIFS_CACHE_WRITE(CIFS_I(inode)))
1030 rc = filemap_fdatawrite(inode->i_mapping);
1032 cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
1036 cifs_put_writer(cinode);
1040 static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
1042 struct cifsFileInfo *cfile = file->private_data;
1043 struct cifs_tcon *tcon;
1046 * whence == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate
1047 * the cached file length
1049 if (whence != SEEK_SET && whence != SEEK_CUR) {
1051 struct inode *inode = file_inode(file);
1054 * We need to be sure that all dirty pages are written and the
1055 * server has the newest file length.
1057 if (!CIFS_CACHE_READ(CIFS_I(inode)) && inode->i_mapping &&
1058 inode->i_mapping->nrpages != 0) {
1059 rc = filemap_fdatawait(inode->i_mapping);
1061 mapping_set_error(inode->i_mapping, rc);
1066 * Some applications poll for the file length in this strange
1067 * way so we must seek to end on non-oplocked files by
1068 * setting the revalidate time to zero.
1070 CIFS_I(inode)->time = 0;
1072 rc = cifs_revalidate_file_attr(file);
1076 if (cfile && cfile->tlink) {
1077 tcon = tlink_tcon(cfile->tlink);
1078 if (tcon->ses->server->ops->llseek)
1079 return tcon->ses->server->ops->llseek(file, tcon,
1082 return generic_file_llseek(file, offset, whence);
1086 cifs_setlease(struct file *file, long arg, struct file_lock **lease, void **priv)
1089 * Note that this is called by vfs setlease with i_lock held to
1090 * protect *lease from going away.
1092 struct inode *inode = file_inode(file);
1093 struct cifsFileInfo *cfile = file->private_data;
1095 if (!(S_ISREG(inode->i_mode)))
1098 /* Check if file is oplocked if this is request for new lease */
1099 if (arg == F_UNLCK ||
1100 ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) ||
1101 ((arg == F_WRLCK) && CIFS_CACHE_WRITE(CIFS_I(inode))))
1102 return generic_setlease(file, arg, lease, priv);
1103 else if (tlink_tcon(cfile->tlink)->local_lease &&
1104 !CIFS_CACHE_READ(CIFS_I(inode)))
1106 * If the server claims to support oplock on this file, then we
1107 * still need to check oplock even if the local_lease mount
1108 * option is set, but there are servers which do not support
1109 * oplock for which this mount option may be useful if the user
1110 * knows that the file won't be changed on the server by anyone
1113 return generic_setlease(file, arg, lease, priv);
1118 struct file_system_type cifs_fs_type = {
1119 .owner = THIS_MODULE,
1121 .init_fs_context = smb3_init_fs_context,
1122 .parameters = smb3_fs_parameters,
1123 .kill_sb = cifs_kill_sb,
1124 .fs_flags = FS_RENAME_DOES_D_MOVE,
1126 MODULE_ALIAS_FS("cifs");
1128 struct file_system_type smb3_fs_type = {
1129 .owner = THIS_MODULE,
1131 .init_fs_context = smb3_init_fs_context,
1132 .parameters = smb3_fs_parameters,
1133 .kill_sb = cifs_kill_sb,
1134 .fs_flags = FS_RENAME_DOES_D_MOVE,
1136 MODULE_ALIAS_FS("smb3");
1137 MODULE_ALIAS("smb3");
1139 const struct inode_operations cifs_dir_inode_ops = {
1140 .create = cifs_create,
1141 .atomic_open = cifs_atomic_open,
1142 .lookup = cifs_lookup,
1143 .getattr = cifs_getattr,
1144 .unlink = cifs_unlink,
1145 .link = cifs_hardlink,
1146 .mkdir = cifs_mkdir,
1147 .rmdir = cifs_rmdir,
1148 .rename = cifs_rename2,
1149 .permission = cifs_permission,
1150 .setattr = cifs_setattr,
1151 .symlink = cifs_symlink,
1152 .mknod = cifs_mknod,
1153 .listxattr = cifs_listxattr,
1156 const struct inode_operations cifs_file_inode_ops = {
1157 .setattr = cifs_setattr,
1158 .getattr = cifs_getattr,
1159 .permission = cifs_permission,
1160 .listxattr = cifs_listxattr,
1161 .fiemap = cifs_fiemap,
1164 const char *cifs_get_link(struct dentry *dentry, struct inode *inode,
1165 struct delayed_call *done)
1169 target_path = kmalloc(PATH_MAX, GFP_KERNEL);
1171 return ERR_PTR(-ENOMEM);
1173 spin_lock(&inode->i_lock);
1174 if (likely(CIFS_I(inode)->symlink_target)) {
1175 strscpy(target_path, CIFS_I(inode)->symlink_target, PATH_MAX);
1178 target_path = ERR_PTR(-EOPNOTSUPP);
1180 spin_unlock(&inode->i_lock);
1182 if (!IS_ERR(target_path))
1183 set_delayed_call(done, kfree_link, target_path);
1188 const struct inode_operations cifs_symlink_inode_ops = {
1189 .get_link = cifs_get_link,
1190 .setattr = cifs_setattr,
1191 .permission = cifs_permission,
1192 .listxattr = cifs_listxattr,
1196 * Advance the EOF marker to after the source range.
1198 static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *src_cifsi,
1199 struct cifs_tcon *src_tcon,
1200 unsigned int xid, loff_t src_end)
1202 struct cifsFileInfo *writeable_srcfile;
1205 writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
1206 if (writeable_srcfile) {
1207 if (src_tcon->ses->server->ops->set_file_size)
1208 rc = src_tcon->ses->server->ops->set_file_size(
1209 xid, src_tcon, writeable_srcfile,
1210 src_inode->i_size, true /* no need to set sparse */);
1213 cifsFileInfo_put(writeable_srcfile);
1214 cifs_dbg(FYI, "SetFSize for copychunk rc = %d\n", rc);
1220 netfs_resize_file(&src_cifsi->netfs, src_end);
1221 fscache_resize_cookie(cifs_inode_cookie(src_inode), src_end);
1225 return filemap_write_and_wait(src_inode->i_mapping);
1229 * Flush out either the folio that overlaps the beginning of a range in which
1230 * pos resides or the folio that overlaps the end of a range unless that folio
1231 * is entirely within the range we're going to invalidate. We extend the flush
1232 * bounds to encompass the folio.
1234 static int cifs_flush_folio(struct inode *inode, loff_t pos, loff_t *_fstart, loff_t *_fend,
1237 struct folio *folio;
1238 unsigned long long fpos, fend;
1239 pgoff_t index = pos / PAGE_SIZE;
1243 folio = filemap_get_folio(inode->i_mapping, index);
1247 size = folio_size(folio);
1248 fpos = folio_pos(folio);
1249 fend = fpos + size - 1;
1250 *_fstart = min_t(unsigned long long, *_fstart, fpos);
1251 *_fend = max_t(unsigned long long, *_fend, fend);
1252 if ((first && pos == fpos) || (!first && pos == fend))
1255 rc = filemap_write_and_wait_range(inode->i_mapping, fpos, fend);
1261 static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
1262 struct file *dst_file, loff_t destoff, loff_t len,
1263 unsigned int remap_flags)
1265 struct inode *src_inode = file_inode(src_file);
1266 struct inode *target_inode = file_inode(dst_file);
1267 struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1268 struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
1269 struct cifsFileInfo *smb_file_src = src_file->private_data;
1270 struct cifsFileInfo *smb_file_target = dst_file->private_data;
1271 struct cifs_tcon *target_tcon, *src_tcon;
1272 unsigned long long destend, fstart, fend, new_size;
1276 if (remap_flags & REMAP_FILE_DEDUP)
1278 if (remap_flags & ~REMAP_FILE_ADVISORY)
1281 cifs_dbg(FYI, "clone range\n");
1285 if (!smb_file_src || !smb_file_target) {
1287 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1291 src_tcon = tlink_tcon(smb_file_src->tlink);
1292 target_tcon = tlink_tcon(smb_file_target->tlink);
1295 * Note: cifs case is easier than btrfs since server responsible for
1296 * checks for proper open modes and file type and if it wants
1297 * server could even support copy of range where source = target
1299 lock_two_nondirectories(target_inode, src_inode);
1302 len = src_inode->i_size - off;
1304 cifs_dbg(FYI, "clone range\n");
1306 /* Flush the source buffer */
1307 rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1312 /* The server-side copy will fail if the source crosses the EOF marker.
1313 * Advance the EOF marker after the flush above to the end of the range
1314 * if it's short of that.
1316 if (src_cifsi->netfs.remote_i_size < off + len) {
1317 rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1322 new_size = destoff + len;
1323 destend = destoff + len - 1;
1325 /* Flush the folios at either end of the destination range to prevent
1326 * accidental loss of dirty data outside of the range.
1331 rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1334 rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1338 /* Discard all the folios that overlap the destination region. */
1339 cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
1340 truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1342 fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
1343 i_size_read(target_inode), 0);
1346 if (target_tcon->ses->server->ops->duplicate_extents) {
1347 rc = target_tcon->ses->server->ops->duplicate_extents(xid,
1348 smb_file_src, smb_file_target, off, len, destoff);
1349 if (rc == 0 && new_size > i_size_read(target_inode)) {
1350 truncate_setsize(target_inode, new_size);
1351 netfs_resize_file(&target_cifsi->netfs, new_size);
1352 fscache_resize_cookie(cifs_inode_cookie(target_inode),
1357 /* force revalidate of size and timestamps of target file now
1358 that target is updated on the server */
1359 CIFS_I(target_inode)->time = 0;
1361 /* although unlocking in the reverse order from locking is not
1362 strictly necessary here it is a little cleaner to be consistent */
1363 unlock_two_nondirectories(src_inode, target_inode);
1366 return rc < 0 ? rc : len;
1369 ssize_t cifs_file_copychunk_range(unsigned int xid,
1370 struct file *src_file, loff_t off,
1371 struct file *dst_file, loff_t destoff,
1372 size_t len, unsigned int flags)
1374 struct inode *src_inode = file_inode(src_file);
1375 struct inode *target_inode = file_inode(dst_file);
1376 struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
1377 struct cifsFileInfo *smb_file_src;
1378 struct cifsFileInfo *smb_file_target;
1379 struct cifs_tcon *src_tcon;
1380 struct cifs_tcon *target_tcon;
1381 unsigned long long destend, fstart, fend;
1384 cifs_dbg(FYI, "copychunk range\n");
1386 if (!src_file->private_data || !dst_file->private_data) {
1388 cifs_dbg(VFS, "missing cifsFileInfo on copy range src file\n");
1393 smb_file_target = dst_file->private_data;
1394 smb_file_src = src_file->private_data;
1395 src_tcon = tlink_tcon(smb_file_src->tlink);
1396 target_tcon = tlink_tcon(smb_file_target->tlink);
1398 if (src_tcon->ses != target_tcon->ses) {
1399 cifs_dbg(VFS, "source and target of copy not on same server\n");
1404 if (!target_tcon->ses->server->ops->copychunk_range)
1408 * Note: cifs case is easier than btrfs since server responsible for
1409 * checks for proper open modes and file type and if it wants
1410 * server could even support copy of range where source = target
1412 lock_two_nondirectories(target_inode, src_inode);
1414 cifs_dbg(FYI, "about to flush pages\n");
1416 rc = filemap_write_and_wait_range(src_inode->i_mapping, off,
1421 /* The server-side copy will fail if the source crosses the EOF marker.
1422 * Advance the EOF marker after the flush above to the end of the range
1423 * if it's short of that.
1425 if (src_cifsi->server_eof < off + len) {
1426 rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
1431 destend = destoff + len - 1;
1433 /* Flush the folios at either end of the destination range to prevent
1434 * accidental loss of dirty data outside of the range.
1439 rc = cifs_flush_folio(target_inode, destoff, &fstart, &fend, true);
1442 rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
1446 /* Discard all the folios that overlap the destination region. */
1447 truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
1449 rc = file_modified(dst_file);
1451 rc = target_tcon->ses->server->ops->copychunk_range(xid,
1452 smb_file_src, smb_file_target, off, len, destoff);
1453 if (rc > 0 && destoff + rc > i_size_read(target_inode))
1454 truncate_setsize(target_inode, destoff + rc);
1457 file_accessed(src_file);
1459 /* force revalidate of size and timestamps of target file now
1460 * that target is updated on the server
1462 CIFS_I(target_inode)->time = 0;
1465 /* although unlocking in the reverse order from locking is not
1466 * strictly necessary here it is a little cleaner to be consistent
1468 unlock_two_nondirectories(src_inode, target_inode);
1475 * Directory operations under CIFS/SMB2/SMB3 are synchronous, so fsync()
1476 * is a dummy operation.
1478 static int cifs_dir_fsync(struct file *file, loff_t start, loff_t end, int datasync)
1480 cifs_dbg(FYI, "Sync directory - name: %pD datasync: 0x%x\n",
1486 static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
1487 struct file *dst_file, loff_t destoff,
1488 size_t len, unsigned int flags)
1490 unsigned int xid = get_xid();
1492 struct cifsFileInfo *cfile = dst_file->private_data;
1494 if (cfile->swapfile) {
1500 rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
1504 if (rc == -EOPNOTSUPP || rc == -EXDEV)
1505 rc = generic_copy_file_range(src_file, off, dst_file,
1506 destoff, len, flags);
1510 const struct file_operations cifs_file_ops = {
1511 .read_iter = cifs_loose_read_iter,
1512 .write_iter = cifs_file_write_iter,
1514 .release = cifs_close,
1516 .flock = cifs_flock,
1517 .fsync = cifs_fsync,
1518 .flush = cifs_flush,
1519 .mmap = cifs_file_mmap,
1520 .splice_read = generic_file_splice_read,
1521 .splice_write = iter_file_splice_write,
1522 .llseek = cifs_llseek,
1523 .unlocked_ioctl = cifs_ioctl,
1524 .copy_file_range = cifs_copy_file_range,
1525 .remap_file_range = cifs_remap_file_range,
1526 .setlease = cifs_setlease,
1527 .fallocate = cifs_fallocate,
1530 const struct file_operations cifs_file_strict_ops = {
1531 .read_iter = cifs_strict_readv,
1532 .write_iter = cifs_strict_writev,
1534 .release = cifs_close,
1536 .flock = cifs_flock,
1537 .fsync = cifs_strict_fsync,
1538 .flush = cifs_flush,
1539 .mmap = cifs_file_strict_mmap,
1540 .splice_read = generic_file_splice_read,
1541 .splice_write = iter_file_splice_write,
1542 .llseek = cifs_llseek,
1543 .unlocked_ioctl = cifs_ioctl,
1544 .copy_file_range = cifs_copy_file_range,
1545 .remap_file_range = cifs_remap_file_range,
1546 .setlease = cifs_setlease,
1547 .fallocate = cifs_fallocate,
1550 const struct file_operations cifs_file_direct_ops = {
1551 .read_iter = cifs_direct_readv,
1552 .write_iter = cifs_direct_writev,
1554 .release = cifs_close,
1556 .flock = cifs_flock,
1557 .fsync = cifs_fsync,
1558 .flush = cifs_flush,
1559 .mmap = cifs_file_mmap,
1560 .splice_read = generic_file_splice_read,
1561 .splice_write = iter_file_splice_write,
1562 .unlocked_ioctl = cifs_ioctl,
1563 .copy_file_range = cifs_copy_file_range,
1564 .remap_file_range = cifs_remap_file_range,
1565 .llseek = cifs_llseek,
1566 .setlease = cifs_setlease,
1567 .fallocate = cifs_fallocate,
1570 const struct file_operations cifs_file_nobrl_ops = {
1571 .read_iter = cifs_loose_read_iter,
1572 .write_iter = cifs_file_write_iter,
1574 .release = cifs_close,
1575 .fsync = cifs_fsync,
1576 .flush = cifs_flush,
1577 .mmap = cifs_file_mmap,
1578 .splice_read = generic_file_splice_read,
1579 .splice_write = iter_file_splice_write,
1580 .llseek = cifs_llseek,
1581 .unlocked_ioctl = cifs_ioctl,
1582 .copy_file_range = cifs_copy_file_range,
1583 .remap_file_range = cifs_remap_file_range,
1584 .setlease = cifs_setlease,
1585 .fallocate = cifs_fallocate,
1588 const struct file_operations cifs_file_strict_nobrl_ops = {
1589 .read_iter = cifs_strict_readv,
1590 .write_iter = cifs_strict_writev,
1592 .release = cifs_close,
1593 .fsync = cifs_strict_fsync,
1594 .flush = cifs_flush,
1595 .mmap = cifs_file_strict_mmap,
1596 .splice_read = generic_file_splice_read,
1597 .splice_write = iter_file_splice_write,
1598 .llseek = cifs_llseek,
1599 .unlocked_ioctl = cifs_ioctl,
1600 .copy_file_range = cifs_copy_file_range,
1601 .remap_file_range = cifs_remap_file_range,
1602 .setlease = cifs_setlease,
1603 .fallocate = cifs_fallocate,
1606 const struct file_operations cifs_file_direct_nobrl_ops = {
1607 .read_iter = cifs_direct_readv,
1608 .write_iter = cifs_direct_writev,
1610 .release = cifs_close,
1611 .fsync = cifs_fsync,
1612 .flush = cifs_flush,
1613 .mmap = cifs_file_mmap,
1614 .splice_read = generic_file_splice_read,
1615 .splice_write = iter_file_splice_write,
1616 .unlocked_ioctl = cifs_ioctl,
1617 .copy_file_range = cifs_copy_file_range,
1618 .remap_file_range = cifs_remap_file_range,
1619 .llseek = cifs_llseek,
1620 .setlease = cifs_setlease,
1621 .fallocate = cifs_fallocate,
1624 const struct file_operations cifs_dir_ops = {
1625 .iterate_shared = cifs_readdir,
1626 .release = cifs_closedir,
1627 .read = generic_read_dir,
1628 .unlocked_ioctl = cifs_ioctl,
1629 .copy_file_range = cifs_copy_file_range,
1630 .remap_file_range = cifs_remap_file_range,
1631 .llseek = generic_file_llseek,
1632 .fsync = cifs_dir_fsync,
1636 cifs_init_once(void *inode)
1638 struct cifsInodeInfo *cifsi = inode;
1640 inode_init_once(&cifsi->netfs.inode);
1641 init_rwsem(&cifsi->lock_sem);
1645 cifs_init_inodecache(void)
1647 cifs_inode_cachep = kmem_cache_create("cifs_inode_cache",
1648 sizeof(struct cifsInodeInfo),
1649 0, (SLAB_RECLAIM_ACCOUNT|
1650 SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1652 if (cifs_inode_cachep == NULL)
1659 cifs_destroy_inodecache(void)
1662 * Make sure all delayed rcu free inodes are flushed before we
1666 kmem_cache_destroy(cifs_inode_cachep);
1670 cifs_init_request_bufs(void)
1673 * SMB2 maximum header size is bigger than CIFS one - no problems to
1674 * allocate some more bytes for CIFS.
1676 size_t max_hdr_size = MAX_SMB2_HDR_SIZE;
1678 if (CIFSMaxBufSize < 8192) {
1679 /* Buffer size can not be smaller than 2 * PATH_MAX since maximum
1680 Unicode path name has to fit in any SMB/CIFS path based frames */
1681 CIFSMaxBufSize = 8192;
1682 } else if (CIFSMaxBufSize > 1024*127) {
1683 CIFSMaxBufSize = 1024 * 127;
1685 CIFSMaxBufSize &= 0x1FE00; /* Round size to even 512 byte mult*/
1688 cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n",
1689 CIFSMaxBufSize, CIFSMaxBufSize);
1691 cifs_req_cachep = kmem_cache_create_usercopy("cifs_request",
1692 CIFSMaxBufSize + max_hdr_size, 0,
1693 SLAB_HWCACHE_ALIGN, 0,
1694 CIFSMaxBufSize + max_hdr_size,
1696 if (cifs_req_cachep == NULL)
1699 if (cifs_min_rcv < 1)
1701 else if (cifs_min_rcv > 64) {
1703 cifs_dbg(VFS, "cifs_min_rcv set to maximum (64)\n");
1706 cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv,
1709 if (cifs_req_poolp == NULL) {
1710 kmem_cache_destroy(cifs_req_cachep);
1713 /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and
1714 almost all handle based requests (but not write response, nor is it
1715 sufficient for path based requests). A smaller size would have
1716 been more efficient (compacting multiple slab items on one 4k page)
1717 for the case in which debug was on, but this larger size allows
1718 more SMBs to use small buffer alloc and is still much more
1719 efficient to alloc 1 per page off the slab compared to 17K (5page)
1720 alloc of large cifs buffers even when page debugging is on */
1721 cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq",
1722 MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
1723 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL);
1724 if (cifs_sm_req_cachep == NULL) {
1725 mempool_destroy(cifs_req_poolp);
1726 kmem_cache_destroy(cifs_req_cachep);
1730 if (cifs_min_small < 2)
1732 else if (cifs_min_small > 256) {
1733 cifs_min_small = 256;
1734 cifs_dbg(FYI, "cifs_min_small set to maximum (256)\n");
1737 cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small,
1738 cifs_sm_req_cachep);
1740 if (cifs_sm_req_poolp == NULL) {
1741 mempool_destroy(cifs_req_poolp);
1742 kmem_cache_destroy(cifs_req_cachep);
1743 kmem_cache_destroy(cifs_sm_req_cachep);
1751 cifs_destroy_request_bufs(void)
1753 mempool_destroy(cifs_req_poolp);
1754 kmem_cache_destroy(cifs_req_cachep);
1755 mempool_destroy(cifs_sm_req_poolp);
1756 kmem_cache_destroy(cifs_sm_req_cachep);
1759 static int init_mids(void)
1761 cifs_mid_cachep = kmem_cache_create("cifs_mpx_ids",
1762 sizeof(struct mid_q_entry), 0,
1763 SLAB_HWCACHE_ALIGN, NULL);
1764 if (cifs_mid_cachep == NULL)
1767 /* 3 is a reasonable minimum number of simultaneous operations */
1768 cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep);
1769 if (cifs_mid_poolp == NULL) {
1770 kmem_cache_destroy(cifs_mid_cachep);
1777 static void destroy_mids(void)
1779 mempool_destroy(cifs_mid_poolp);
1780 kmem_cache_destroy(cifs_mid_cachep);
1788 INIT_LIST_HEAD(&cifs_tcp_ses_list);
1790 * Initialize Global counters
1792 atomic_set(&sesInfoAllocCount, 0);
1793 atomic_set(&tconInfoAllocCount, 0);
1794 atomic_set(&tcpSesNextId, 0);
1795 atomic_set(&tcpSesAllocCount, 0);
1796 atomic_set(&tcpSesReconnectCount, 0);
1797 atomic_set(&tconInfoReconnectCount, 0);
1799 atomic_set(&buf_alloc_count, 0);
1800 atomic_set(&small_buf_alloc_count, 0);
1801 #ifdef CONFIG_CIFS_STATS2
1802 atomic_set(&total_buf_alloc_count, 0);
1803 atomic_set(&total_small_buf_alloc_count, 0);
1804 if (slow_rsp_threshold < 1)
1805 cifs_dbg(FYI, "slow_response_threshold msgs disabled\n");
1806 else if (slow_rsp_threshold > 32767)
1808 "slow response threshold set higher than recommended (0 to 32767)\n");
1809 #endif /* CONFIG_CIFS_STATS2 */
1811 atomic_set(&mid_count, 0);
1812 GlobalCurrentXid = 0;
1813 GlobalTotalActiveXid = 0;
1814 GlobalMaxActiveXid = 0;
1815 spin_lock_init(&cifs_tcp_ses_lock);
1816 spin_lock_init(&GlobalMid_Lock);
1818 cifs_lock_secret = get_random_u32();
1820 if (cifs_max_pending < 2) {
1821 cifs_max_pending = 2;
1822 cifs_dbg(FYI, "cifs_max_pending set to min of 2\n");
1823 } else if (cifs_max_pending > CIFS_MAX_REQ) {
1824 cifs_max_pending = CIFS_MAX_REQ;
1825 cifs_dbg(FYI, "cifs_max_pending set to max of %u\n",
1829 cifsiod_wq = alloc_workqueue("cifsiod", WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1832 goto out_clean_proc;
1836 * Consider in future setting limit!=0 maybe to min(num_of_cores - 1, 3)
1837 * so that we don't launch too many worker threads but
1838 * Documentation/core-api/workqueue.rst recommends setting it to 0
1841 /* WQ_UNBOUND allows decrypt tasks to run on any CPU */
1842 decrypt_wq = alloc_workqueue("smb3decryptd",
1843 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1846 goto out_destroy_cifsiod_wq;
1849 fileinfo_put_wq = alloc_workqueue("cifsfileinfoput",
1850 WQ_UNBOUND|WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1851 if (!fileinfo_put_wq) {
1853 goto out_destroy_decrypt_wq;
1856 cifsoplockd_wq = alloc_workqueue("cifsoplockd",
1857 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1858 if (!cifsoplockd_wq) {
1860 goto out_destroy_fileinfo_put_wq;
1863 deferredclose_wq = alloc_workqueue("deferredclose",
1864 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1865 if (!deferredclose_wq) {
1867 goto out_destroy_cifsoplockd_wq;
1870 serverclose_wq = alloc_workqueue("serverclose",
1871 WQ_FREEZABLE|WQ_MEM_RECLAIM, 0);
1872 if (!serverclose_wq) {
1874 goto out_destroy_serverclose_wq;
1877 rc = cifs_init_inodecache();
1879 goto out_destroy_deferredclose_wq;
1883 goto out_destroy_inodecache;
1885 rc = cifs_init_request_bufs();
1887 goto out_destroy_mids;
1889 #ifdef CONFIG_CIFS_DFS_UPCALL
1890 rc = dfs_cache_init();
1892 goto out_destroy_request_bufs;
1893 #endif /* CONFIG_CIFS_DFS_UPCALL */
1894 #ifdef CONFIG_CIFS_UPCALL
1895 rc = init_cifs_spnego();
1897 goto out_destroy_dfs_cache;
1898 #endif /* CONFIG_CIFS_UPCALL */
1899 #ifdef CONFIG_CIFS_SWN_UPCALL
1900 rc = cifs_genl_init();
1902 goto out_register_key_type;
1903 #endif /* CONFIG_CIFS_SWN_UPCALL */
1905 rc = init_cifs_idmap();
1907 goto out_cifs_swn_init;
1909 rc = register_filesystem(&cifs_fs_type);
1911 goto out_init_cifs_idmap;
1913 rc = register_filesystem(&smb3_fs_type);
1915 unregister_filesystem(&cifs_fs_type);
1916 goto out_init_cifs_idmap;
1921 out_init_cifs_idmap:
1924 #ifdef CONFIG_CIFS_SWN_UPCALL
1926 out_register_key_type:
1928 #ifdef CONFIG_CIFS_UPCALL
1930 out_destroy_dfs_cache:
1932 #ifdef CONFIG_CIFS_DFS_UPCALL
1933 dfs_cache_destroy();
1934 out_destroy_request_bufs:
1936 cifs_destroy_request_bufs();
1939 out_destroy_inodecache:
1940 cifs_destroy_inodecache();
1941 out_destroy_deferredclose_wq:
1942 destroy_workqueue(deferredclose_wq);
1943 out_destroy_cifsoplockd_wq:
1944 destroy_workqueue(cifsoplockd_wq);
1945 out_destroy_fileinfo_put_wq:
1946 destroy_workqueue(fileinfo_put_wq);
1947 out_destroy_decrypt_wq:
1948 destroy_workqueue(decrypt_wq);
1949 out_destroy_cifsiod_wq:
1950 destroy_workqueue(cifsiod_wq);
1951 out_destroy_serverclose_wq:
1952 destroy_workqueue(serverclose_wq);
1961 cifs_dbg(NOISY, "exit_smb3\n");
1962 unregister_filesystem(&cifs_fs_type);
1963 unregister_filesystem(&smb3_fs_type);
1964 cifs_dfs_release_automount_timer();
1966 #ifdef CONFIG_CIFS_SWN_UPCALL
1969 #ifdef CONFIG_CIFS_UPCALL
1972 #ifdef CONFIG_CIFS_DFS_UPCALL
1973 dfs_cache_destroy();
1975 cifs_destroy_request_bufs();
1977 cifs_destroy_inodecache();
1978 destroy_workqueue(deferredclose_wq);
1979 destroy_workqueue(cifsoplockd_wq);
1980 destroy_workqueue(decrypt_wq);
1981 destroy_workqueue(fileinfo_put_wq);
1982 destroy_workqueue(serverclose_wq);
1983 destroy_workqueue(cifsiod_wq);
1987 MODULE_AUTHOR("Steve French");
1988 MODULE_LICENSE("GPL"); /* combination of LGPL + GPL source behaves as GPL */
1990 ("VFS to access SMB3 servers e.g. Samba, Macs, Azure and Windows (and "
1991 "also older servers complying with the SNIA CIFS Specification)");
1992 MODULE_VERSION(CIFS_VERSION);
1993 MODULE_SOFTDEP("ecb");
1994 MODULE_SOFTDEP("hmac");
1995 MODULE_SOFTDEP("md5");
1996 MODULE_SOFTDEP("nls");
1997 MODULE_SOFTDEP("aes");
1998 MODULE_SOFTDEP("cmac");
1999 MODULE_SOFTDEP("sha256");
2000 MODULE_SOFTDEP("sha512");
2001 MODULE_SOFTDEP("aead2");
2002 MODULE_SOFTDEP("ccm");
2003 MODULE_SOFTDEP("gcm");
2004 module_init(init_cifs)
2005 module_exit(exit_cifs)