1 // SPDX-License-Identifier: LGPL-2.1
4 * Copyright (C) International Business Machines Corp., 2002,2008
5 * Author(s): Steve French (sfrench@us.ibm.com)
9 #include <linux/slab.h>
10 #include <linux/ctype.h>
11 #include <linux/mempool.h>
12 #include <linux/vmalloc.h>
15 #include "cifsproto.h"
16 #include "cifs_debug.h"
19 #include "cifs_unicode.h"
22 #ifdef CONFIG_CIFS_DFS_UPCALL
23 #include "dns_resolve.h"
24 #include "dfs_cache.h"
26 #include "fs_context.h"
27 #include "cached_dir.h"
29 extern mempool_t *cifs_sm_req_poolp;
30 extern mempool_t *cifs_req_poolp;
32 /* The xid serves as a useful identifier for each incoming vfs request,
33 in a similar way to the mid which is useful to track each sent smb,
34 and CurrentXid can also provide a running counter (although it
35 will eventually wrap past zero) of the total vfs operations handled
36 since the cifs fs was mounted */
43 spin_lock(&GlobalMid_Lock);
44 GlobalTotalActiveXid++;
46 /* keep high water mark for number of simultaneous ops in filesystem */
47 if (GlobalTotalActiveXid > GlobalMaxActiveXid)
48 GlobalMaxActiveXid = GlobalTotalActiveXid;
49 if (GlobalTotalActiveXid > 65000)
50 cifs_dbg(FYI, "warning: more than 65000 requests active\n");
51 xid = GlobalCurrentXid++;
52 spin_unlock(&GlobalMid_Lock);
57 _free_xid(unsigned int xid)
59 spin_lock(&GlobalMid_Lock);
60 /* if (GlobalTotalActiveXid == 0)
62 GlobalTotalActiveXid--;
63 spin_unlock(&GlobalMid_Lock);
69 struct cifs_ses *ret_buf;
71 ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL);
73 atomic_inc(&sesInfoAllocCount);
74 spin_lock_init(&ret_buf->ses_lock);
75 ret_buf->ses_status = SES_NEW;
77 INIT_LIST_HEAD(&ret_buf->smb_ses_list);
78 INIT_LIST_HEAD(&ret_buf->tcon_list);
79 mutex_init(&ret_buf->session_mutex);
80 spin_lock_init(&ret_buf->iface_lock);
81 INIT_LIST_HEAD(&ret_buf->iface_list);
82 spin_lock_init(&ret_buf->chan_lock);
88 sesInfoFree(struct cifs_ses *buf_to_free)
90 struct cifs_server_iface *iface = NULL, *niface = NULL;
92 if (buf_to_free == NULL) {
93 cifs_dbg(FYI, "Null buffer passed to sesInfoFree\n");
97 atomic_dec(&sesInfoAllocCount);
98 kfree(buf_to_free->serverOS);
99 kfree(buf_to_free->serverDomain);
100 kfree(buf_to_free->serverNOS);
101 kfree_sensitive(buf_to_free->password);
102 kfree(buf_to_free->user_name);
103 kfree(buf_to_free->domainName);
104 kfree_sensitive(buf_to_free->auth_key.response);
105 spin_lock(&buf_to_free->iface_lock);
106 list_for_each_entry_safe(iface, niface, &buf_to_free->iface_list,
108 kref_put(&iface->refcount, release_iface);
109 spin_unlock(&buf_to_free->iface_lock);
110 kfree_sensitive(buf_to_free);
116 struct cifs_tcon *ret_buf;
118 ret_buf = kzalloc(sizeof(*ret_buf), GFP_KERNEL);
121 ret_buf->cfids = init_cached_dirs();
122 if (!ret_buf->cfids) {
127 atomic_inc(&tconInfoAllocCount);
128 ret_buf->status = TID_NEW;
130 spin_lock_init(&ret_buf->tc_lock);
131 INIT_LIST_HEAD(&ret_buf->openFileList);
132 INIT_LIST_HEAD(&ret_buf->tcon_list);
133 spin_lock_init(&ret_buf->open_file_lock);
134 spin_lock_init(&ret_buf->stat_lock);
135 atomic_set(&ret_buf->num_local_opens, 0);
136 atomic_set(&ret_buf->num_remote_opens, 0);
142 tconInfoFree(struct cifs_tcon *tcon)
145 cifs_dbg(FYI, "Null buffer passed to tconInfoFree\n");
148 free_cached_dirs(tcon->cfids);
149 atomic_dec(&tconInfoAllocCount);
150 kfree(tcon->nativeFileSystem);
151 kfree_sensitive(tcon->password);
158 struct smb_hdr *ret_buf = NULL;
160 * SMB2 header is bigger than CIFS one - no problems to clean some
161 * more bytes for CIFS.
163 size_t buf_size = sizeof(struct smb2_hdr);
166 * We could use negotiated size instead of max_msgsize -
167 * but it may be more efficient to always alloc same size
168 * albeit slightly larger than necessary and maxbuffersize
169 * defaults to this and can not be bigger.
171 ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS);
173 /* clear the first few header bytes */
174 /* for most paths, more is cleared in header_assemble */
175 memset(ret_buf, 0, buf_size + 3);
176 atomic_inc(&buf_alloc_count);
177 #ifdef CONFIG_CIFS_STATS2
178 atomic_inc(&total_buf_alloc_count);
179 #endif /* CONFIG_CIFS_STATS2 */
185 cifs_buf_release(void *buf_to_free)
187 if (buf_to_free == NULL) {
188 /* cifs_dbg(FYI, "Null buffer passed to cifs_buf_release\n");*/
191 mempool_free(buf_to_free, cifs_req_poolp);
193 atomic_dec(&buf_alloc_count);
198 cifs_small_buf_get(void)
200 struct smb_hdr *ret_buf = NULL;
202 /* We could use negotiated size instead of max_msgsize -
203 but it may be more efficient to always alloc same size
204 albeit slightly larger than necessary and maxbuffersize
205 defaults to this and can not be bigger */
206 ret_buf = mempool_alloc(cifs_sm_req_poolp, GFP_NOFS);
207 /* No need to clear memory here, cleared in header assemble */
208 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
209 atomic_inc(&small_buf_alloc_count);
210 #ifdef CONFIG_CIFS_STATS2
211 atomic_inc(&total_small_buf_alloc_count);
212 #endif /* CONFIG_CIFS_STATS2 */
218 cifs_small_buf_release(void *buf_to_free)
221 if (buf_to_free == NULL) {
222 cifs_dbg(FYI, "Null buffer passed to cifs_small_buf_release\n");
225 mempool_free(buf_to_free, cifs_sm_req_poolp);
227 atomic_dec(&small_buf_alloc_count);
232 free_rsp_buf(int resp_buftype, void *rsp)
234 if (resp_buftype == CIFS_SMALL_BUFFER)
235 cifs_small_buf_release(rsp);
236 else if (resp_buftype == CIFS_LARGE_BUFFER)
237 cifs_buf_release(rsp);
240 /* NB: MID can not be set if treeCon not passed in, in that
241 case it is responsbility of caller to set the mid */
243 header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
244 const struct cifs_tcon *treeCon, int word_count
245 /* length of fixed section (word count) in two byte units */)
247 char *temp = (char *) buffer;
249 memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
251 buffer->smb_buf_length = cpu_to_be32(
252 (2 * word_count) + sizeof(struct smb_hdr) -
253 4 /* RFC 1001 length field does not count */ +
254 2 /* for bcc field itself */) ;
256 buffer->Protocol[0] = 0xFF;
257 buffer->Protocol[1] = 'S';
258 buffer->Protocol[2] = 'M';
259 buffer->Protocol[3] = 'B';
260 buffer->Command = smb_command;
261 buffer->Flags = 0x00; /* case sensitive */
262 buffer->Flags2 = SMBFLG2_KNOWS_LONG_NAMES;
263 buffer->Pid = cpu_to_le16((__u16)current->tgid);
264 buffer->PidHigh = cpu_to_le16((__u16)(current->tgid >> 16));
266 buffer->Tid = treeCon->tid;
268 if (treeCon->ses->capabilities & CAP_UNICODE)
269 buffer->Flags2 |= SMBFLG2_UNICODE;
270 if (treeCon->ses->capabilities & CAP_STATUS32)
271 buffer->Flags2 |= SMBFLG2_ERR_STATUS;
273 /* Uid is not converted */
274 buffer->Uid = treeCon->ses->Suid;
275 if (treeCon->ses->server)
276 buffer->Mid = get_next_mid(treeCon->ses->server);
278 if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
279 buffer->Flags2 |= SMBFLG2_DFS;
281 buffer->Flags |= SMBFLG_CASELESS;
282 if ((treeCon->ses) && (treeCon->ses->server))
283 if (treeCon->ses->server->sign)
284 buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
287 /* endian conversion of flags is now done just before sending */
288 buffer->WordCount = (char) word_count;
293 check_smb_hdr(struct smb_hdr *smb)
295 /* does it have the right SMB "signature" ? */
296 if (*(__le32 *) smb->Protocol != cpu_to_le32(0x424d53ff)) {
297 cifs_dbg(VFS, "Bad protocol string signature header 0x%x\n",
298 *(unsigned int *)smb->Protocol);
302 /* if it's a response then accept */
303 if (smb->Flags & SMBFLG_RESPONSE)
306 /* only one valid case where server sends us request */
307 if (smb->Command == SMB_COM_LOCKING_ANDX)
310 cifs_dbg(VFS, "Server sent request, not response. mid=%u\n",
316 checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
318 struct smb_hdr *smb = (struct smb_hdr *)buf;
319 __u32 rfclen = be32_to_cpu(smb->smb_buf_length);
320 __u32 clc_len; /* calculated length */
321 cifs_dbg(FYI, "checkSMB Length: 0x%x, smb_buf_length: 0x%x\n",
324 /* is this frame too small to even get to a BCC? */
325 if (total_read < 2 + sizeof(struct smb_hdr)) {
326 if ((total_read >= sizeof(struct smb_hdr) - 1)
327 && (smb->Status.CifsError != 0)) {
328 /* it's an error return */
330 /* some error cases do not return wct and bcc */
332 } else if ((total_read == sizeof(struct smb_hdr) + 1) &&
333 (smb->WordCount == 0)) {
334 char *tmp = (char *)smb;
335 /* Need to work around a bug in two servers here */
336 /* First, check if the part of bcc they sent was zero */
337 if (tmp[sizeof(struct smb_hdr)] == 0) {
338 /* some servers return only half of bcc
339 * on simple responses (wct, bcc both zero)
340 * in particular have seen this on
341 * ulogoffX and FindClose. This leaves
342 * one byte of bcc potentially unitialized
344 /* zero rest of bcc */
345 tmp[sizeof(struct smb_hdr)+1] = 0;
348 cifs_dbg(VFS, "rcvd invalid byte count (bcc)\n");
350 cifs_dbg(VFS, "Length less than smb header size\n");
353 } else if (total_read < sizeof(*smb) + 2 * smb->WordCount) {
354 cifs_dbg(VFS, "%s: can't read BCC due to invalid WordCount(%u)\n",
355 __func__, smb->WordCount);
359 /* otherwise, there is enough to get to the BCC */
360 if (check_smb_hdr(smb))
362 clc_len = smbCalcSize(smb);
364 if (4 + rfclen != total_read) {
365 cifs_dbg(VFS, "Length read does not match RFC1001 length %d\n",
370 if (4 + rfclen != clc_len) {
371 __u16 mid = get_mid(smb);
372 /* check if bcc wrapped around for large read responses */
373 if ((rfclen > 64 * 1024) && (rfclen > clc_len)) {
374 /* check if lengths match mod 64K */
375 if (((4 + rfclen) & 0xFFFF) == (clc_len & 0xFFFF))
376 return 0; /* bcc wrapped */
378 cifs_dbg(FYI, "Calculated size %u vs length %u mismatch for mid=%u\n",
379 clc_len, 4 + rfclen, mid);
381 if (4 + rfclen < clc_len) {
382 cifs_dbg(VFS, "RFC1001 size %u smaller than SMB for mid=%u\n",
385 } else if (rfclen > clc_len + 512) {
387 * Some servers (Windows XP in particular) send more
388 * data than the lengths in the SMB packet would
389 * indicate on certain calls (byte range locks and
390 * trans2 find first calls in particular). While the
391 * client can handle such a frame by ignoring the
392 * trailing data, we choose limit the amount of extra
395 cifs_dbg(VFS, "RFC1001 size %u more than 512 bytes larger than SMB for mid=%u\n",
404 is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
406 struct smb_hdr *buf = (struct smb_hdr *)buffer;
407 struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf;
408 struct TCP_Server_Info *pserver;
409 struct cifs_ses *ses;
410 struct cifs_tcon *tcon;
411 struct cifsInodeInfo *pCifsInode;
412 struct cifsFileInfo *netfile;
414 cifs_dbg(FYI, "Checking for oplock break or dnotify response\n");
415 if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
416 (pSMB->hdr.Flags & SMBFLG_RESPONSE)) {
417 struct smb_com_transaction_change_notify_rsp *pSMBr =
418 (struct smb_com_transaction_change_notify_rsp *)buf;
419 struct file_notify_information *pnotify;
420 __u32 data_offset = 0;
421 size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
423 if (get_bcc(buf) > sizeof(struct file_notify_information)) {
424 data_offset = le32_to_cpu(pSMBr->DataOffset);
427 len - sizeof(struct file_notify_information)) {
428 cifs_dbg(FYI, "Invalid data_offset %u\n",
432 pnotify = (struct file_notify_information *)
433 ((char *)&pSMBr->hdr.Protocol + data_offset);
434 cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
435 pnotify->FileName, pnotify->Action);
436 /* cifs_dump_mem("Rcvd notify Data: ",buf,
437 sizeof(struct smb_hdr)+60); */
440 if (pSMBr->hdr.Status.CifsError) {
441 cifs_dbg(FYI, "notify err 0x%x\n",
442 pSMBr->hdr.Status.CifsError);
447 if (pSMB->hdr.Command != SMB_COM_LOCKING_ANDX)
449 if (pSMB->hdr.Flags & SMBFLG_RESPONSE) {
450 /* no sense logging error on invalid handle on oplock
451 break - harmless race between close request and oplock
452 break response is expected from time to time writing out
453 large dirty files cached on the client */
454 if ((NT_STATUS_INVALID_HANDLE) ==
455 le32_to_cpu(pSMB->hdr.Status.CifsError)) {
456 cifs_dbg(FYI, "Invalid handle on oplock break\n");
458 } else if (ERRbadfid ==
459 le16_to_cpu(pSMB->hdr.Status.DosError.Error)) {
462 return false; /* on valid oplock brk we get "request" */
465 if (pSMB->hdr.WordCount != 8)
468 cifs_dbg(FYI, "oplock type 0x%x level 0x%x\n",
469 pSMB->LockType, pSMB->OplockLevel);
470 if (!(pSMB->LockType & LOCKING_ANDX_OPLOCK_RELEASE))
473 /* If server is a channel, select the primary channel */
474 pserver = CIFS_SERVER_IS_CHAN(srv) ? srv->primary_server : srv;
476 /* look up tcon based on tid & uid */
477 spin_lock(&cifs_tcp_ses_lock);
478 list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
479 if (cifs_ses_exiting(ses))
481 list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
482 if (tcon->tid != buf->Tid)
485 cifs_stats_inc(&tcon->stats.cifs_stats.num_oplock_brks);
486 spin_lock(&tcon->open_file_lock);
487 list_for_each_entry(netfile, &tcon->openFileList, tlist) {
488 if (pSMB->Fid != netfile->fid.netfid)
491 cifs_dbg(FYI, "file id match, oplock break\n");
492 pCifsInode = CIFS_I(d_inode(netfile->dentry));
494 set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
497 netfile->oplock_epoch = 0;
498 netfile->oplock_level = pSMB->OplockLevel;
499 netfile->oplock_break_cancelled = false;
500 cifs_queue_oplock_break(netfile);
502 spin_unlock(&tcon->open_file_lock);
503 spin_unlock(&cifs_tcp_ses_lock);
506 spin_unlock(&tcon->open_file_lock);
507 spin_unlock(&cifs_tcp_ses_lock);
508 cifs_dbg(FYI, "No matching file for oplock break\n");
512 spin_unlock(&cifs_tcp_ses_lock);
513 cifs_dbg(FYI, "Can not process oplock break for non-existent connection\n");
518 dump_smb(void *buf, int smb_buf_length)
523 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, 8, 2, buf,
524 smb_buf_length, true);
528 cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb)
530 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
531 struct cifs_tcon *tcon = NULL;
533 if (cifs_sb->master_tlink)
534 tcon = cifs_sb_master_tcon(cifs_sb);
536 cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_SERVER_INUM;
537 cifs_sb->mnt_cifs_serverino_autodisabled = true;
538 cifs_dbg(VFS, "Autodisabling the use of server inode numbers on %s\n",
539 tcon ? tcon->tree_name : "new server");
540 cifs_dbg(VFS, "The server doesn't seem to support them properly or the files might be on different servers (DFS)\n");
541 cifs_dbg(VFS, "Hardlinks will not be recognized on this mount. Consider mounting with the \"noserverino\" option to silence this message.\n");
546 void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
550 if (oplock == OPLOCK_EXCLUSIVE) {
551 cinode->oplock = CIFS_CACHE_WRITE_FLG | CIFS_CACHE_READ_FLG;
552 cifs_dbg(FYI, "Exclusive Oplock granted on inode %p\n",
553 &cinode->netfs.inode);
554 } else if (oplock == OPLOCK_READ) {
555 cinode->oplock = CIFS_CACHE_READ_FLG;
556 cifs_dbg(FYI, "Level II Oplock granted on inode %p\n",
557 &cinode->netfs.inode);
563 * We wait for oplock breaks to be processed before we attempt to perform
566 int cifs_get_writer(struct cifsInodeInfo *cinode)
571 rc = wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK,
576 spin_lock(&cinode->writers_lock);
577 if (!cinode->writers)
578 set_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
580 /* Check to see if we have started servicing an oplock break */
581 if (test_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags)) {
583 if (cinode->writers == 0) {
584 clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
585 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
587 spin_unlock(&cinode->writers_lock);
590 spin_unlock(&cinode->writers_lock);
594 void cifs_put_writer(struct cifsInodeInfo *cinode)
596 spin_lock(&cinode->writers_lock);
598 if (cinode->writers == 0) {
599 clear_bit(CIFS_INODE_PENDING_WRITERS, &cinode->flags);
600 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS);
602 spin_unlock(&cinode->writers_lock);
606 * cifs_queue_oplock_break - queue the oplock break handler for cfile
607 * @cfile: The file to break the oplock on
609 * This function is called from the demultiplex thread when it
610 * receives an oplock break for @cfile.
612 * Assumes the tcon->open_file_lock is held.
613 * Assumes cfile->file_info_lock is NOT held.
615 void cifs_queue_oplock_break(struct cifsFileInfo *cfile)
618 * Bump the handle refcount now while we hold the
619 * open_file_lock to enforce the validity of it for the oplock
620 * break handler. The matching put is done at the end of the
623 cifsFileInfo_get(cfile);
625 queue_work(cifsoplockd_wq, &cfile->oplock_break);
628 void cifs_done_oplock_break(struct cifsInodeInfo *cinode)
630 clear_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
631 wake_up_bit(&cinode->flags, CIFS_INODE_PENDING_OPLOCK_BREAK);
635 backup_cred(struct cifs_sb_info *cifs_sb)
637 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) {
638 if (uid_eq(cifs_sb->ctx->backupuid, current_fsuid()))
641 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) {
642 if (in_group_p(cifs_sb->ctx->backupgid))
650 cifs_del_pending_open(struct cifs_pending_open *open)
652 spin_lock(&tlink_tcon(open->tlink)->open_file_lock);
653 list_del(&open->olist);
654 spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
658 cifs_add_pending_open_locked(struct cifs_fid *fid, struct tcon_link *tlink,
659 struct cifs_pending_open *open)
661 memcpy(open->lease_key, fid->lease_key, SMB2_LEASE_KEY_SIZE);
662 open->oplock = CIFS_OPLOCK_NO_CHANGE;
664 fid->pending_open = open;
665 list_add_tail(&open->olist, &tlink_tcon(tlink)->pending_opens);
669 cifs_add_pending_open(struct cifs_fid *fid, struct tcon_link *tlink,
670 struct cifs_pending_open *open)
672 spin_lock(&tlink_tcon(tlink)->open_file_lock);
673 cifs_add_pending_open_locked(fid, tlink, open);
674 spin_unlock(&tlink_tcon(open->tlink)->open_file_lock);
678 * Critical section which runs after acquiring deferred_lock.
679 * As there is no reference count on cifs_deferred_close, pdclose
680 * should not be used outside deferred_lock.
683 cifs_is_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close **pdclose)
685 struct cifs_deferred_close *dclose;
687 list_for_each_entry(dclose, &CIFS_I(d_inode(cfile->dentry))->deferred_closes, dlist) {
688 if ((dclose->netfid == cfile->fid.netfid) &&
689 (dclose->persistent_fid == cfile->fid.persistent_fid) &&
690 (dclose->volatile_fid == cfile->fid.volatile_fid)) {
699 * Critical section which runs after acquiring deferred_lock.
702 cifs_add_deferred_close(struct cifsFileInfo *cfile, struct cifs_deferred_close *dclose)
704 bool is_deferred = false;
705 struct cifs_deferred_close *pdclose;
707 is_deferred = cifs_is_deferred_close(cfile, &pdclose);
713 dclose->tlink = cfile->tlink;
714 dclose->netfid = cfile->fid.netfid;
715 dclose->persistent_fid = cfile->fid.persistent_fid;
716 dclose->volatile_fid = cfile->fid.volatile_fid;
717 list_add_tail(&dclose->dlist, &CIFS_I(d_inode(cfile->dentry))->deferred_closes);
721 * Critical section which runs after acquiring deferred_lock.
724 cifs_del_deferred_close(struct cifsFileInfo *cfile)
726 bool is_deferred = false;
727 struct cifs_deferred_close *dclose;
729 is_deferred = cifs_is_deferred_close(cfile, &dclose);
732 list_del(&dclose->dlist);
737 cifs_close_deferred_file(struct cifsInodeInfo *cifs_inode)
739 struct cifsFileInfo *cfile = NULL;
740 struct file_list *tmp_list, *tmp_next_list;
741 struct list_head file_head;
743 if (cifs_inode == NULL)
746 INIT_LIST_HEAD(&file_head);
747 spin_lock(&cifs_inode->open_file_lock);
748 list_for_each_entry(cfile, &cifs_inode->openFileList, flist) {
749 if (delayed_work_pending(&cfile->deferred)) {
750 if (cancel_delayed_work(&cfile->deferred)) {
751 spin_lock(&cifs_inode->deferred_lock);
752 cifs_del_deferred_close(cfile);
753 spin_unlock(&cifs_inode->deferred_lock);
755 tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
756 if (tmp_list == NULL)
758 tmp_list->cfile = cfile;
759 list_add_tail(&tmp_list->list, &file_head);
763 spin_unlock(&cifs_inode->open_file_lock);
765 list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
766 _cifsFileInfo_put(tmp_list->cfile, false, false);
767 list_del(&tmp_list->list);
773 cifs_close_all_deferred_files(struct cifs_tcon *tcon)
775 struct cifsFileInfo *cfile;
776 struct file_list *tmp_list, *tmp_next_list;
777 struct list_head file_head;
779 INIT_LIST_HEAD(&file_head);
780 spin_lock(&tcon->open_file_lock);
781 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
782 if (delayed_work_pending(&cfile->deferred)) {
783 if (cancel_delayed_work(&cfile->deferred)) {
784 spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
785 cifs_del_deferred_close(cfile);
786 spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
788 tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
789 if (tmp_list == NULL)
791 tmp_list->cfile = cfile;
792 list_add_tail(&tmp_list->list, &file_head);
796 spin_unlock(&tcon->open_file_lock);
798 list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
799 _cifsFileInfo_put(tmp_list->cfile, true, false);
800 list_del(&tmp_list->list);
805 cifs_close_deferred_file_under_dentry(struct cifs_tcon *tcon, const char *path)
807 struct cifsFileInfo *cfile;
808 struct file_list *tmp_list, *tmp_next_list;
809 struct list_head file_head;
811 const char *full_path;
813 INIT_LIST_HEAD(&file_head);
814 page = alloc_dentry_path();
815 spin_lock(&tcon->open_file_lock);
816 list_for_each_entry(cfile, &tcon->openFileList, tlist) {
817 full_path = build_path_from_dentry(cfile->dentry, page);
818 if (strstr(full_path, path)) {
819 if (delayed_work_pending(&cfile->deferred)) {
820 if (cancel_delayed_work(&cfile->deferred)) {
821 spin_lock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
822 cifs_del_deferred_close(cfile);
823 spin_unlock(&CIFS_I(d_inode(cfile->dentry))->deferred_lock);
825 tmp_list = kmalloc(sizeof(struct file_list), GFP_ATOMIC);
826 if (tmp_list == NULL)
828 tmp_list->cfile = cfile;
829 list_add_tail(&tmp_list->list, &file_head);
834 spin_unlock(&tcon->open_file_lock);
836 list_for_each_entry_safe(tmp_list, tmp_next_list, &file_head, list) {
837 _cifsFileInfo_put(tmp_list->cfile, true, false);
838 list_del(&tmp_list->list);
841 free_dentry_path(page);
844 /* parses DFS referral V3 structure
845 * caller is responsible for freeing target_nodes
848 * - on failure - errno
851 parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
852 unsigned int *num_of_nodes,
853 struct dfs_info3_param **target_nodes,
854 const struct nls_table *nls_codepage, int remap,
855 const char *searchName, bool is_unicode)
859 struct dfs_referral_level_3 *ref;
861 *num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals);
863 if (*num_of_nodes < 1) {
864 cifs_dbg(VFS, "num_referrals: must be at least > 0, but we get num_referrals = %d\n",
867 goto parse_DFS_referrals_exit;
870 ref = (struct dfs_referral_level_3 *) &(rsp->referrals);
871 if (ref->VersionNumber != cpu_to_le16(3)) {
872 cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n",
873 le16_to_cpu(ref->VersionNumber));
875 goto parse_DFS_referrals_exit;
878 /* get the upper boundary of the resp buffer */
879 data_end = (char *)rsp + rsp_size;
881 cifs_dbg(FYI, "num_referrals: %d dfs flags: 0x%x ...\n",
882 *num_of_nodes, le32_to_cpu(rsp->DFSFlags));
884 *target_nodes = kcalloc(*num_of_nodes, sizeof(struct dfs_info3_param),
886 if (*target_nodes == NULL) {
888 goto parse_DFS_referrals_exit;
891 /* collect necessary data from referrals */
892 for (i = 0; i < *num_of_nodes; i++) {
895 struct dfs_info3_param *node = (*target_nodes)+i;
897 node->flags = le32_to_cpu(rsp->DFSFlags);
899 __le16 *tmp = kmalloc(strlen(searchName)*2 + 2,
903 goto parse_DFS_referrals_exit;
905 cifsConvertToUTF16((__le16 *) tmp, searchName,
906 PATH_MAX, nls_codepage, remap);
907 node->path_consumed = cifs_utf16_bytes(tmp,
908 le16_to_cpu(rsp->PathConsumed),
912 node->path_consumed = le16_to_cpu(rsp->PathConsumed);
914 node->server_type = le16_to_cpu(ref->ServerType);
915 node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags);
918 temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset);
919 max_len = data_end - temp;
920 node->path_name = cifs_strndup_from_utf16(temp, max_len,
921 is_unicode, nls_codepage);
922 if (!node->path_name) {
924 goto parse_DFS_referrals_exit;
927 /* copy link target UNC */
928 temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset);
929 max_len = data_end - temp;
930 node->node_name = cifs_strndup_from_utf16(temp, max_len,
931 is_unicode, nls_codepage);
932 if (!node->node_name) {
934 goto parse_DFS_referrals_exit;
937 node->ttl = le32_to_cpu(ref->TimeToLive);
942 parse_DFS_referrals_exit:
944 free_dfs_info_array(*target_nodes, *num_of_nodes);
945 *target_nodes = NULL;
951 struct cifs_aio_ctx *
952 cifs_aio_ctx_alloc(void)
954 struct cifs_aio_ctx *ctx;
957 * Must use kzalloc to initialize ctx->bv to NULL and ctx->direct_io
958 * to false so that we know when we have to unreference pages within
959 * cifs_aio_ctx_release()
961 ctx = kzalloc(sizeof(struct cifs_aio_ctx), GFP_KERNEL);
965 INIT_LIST_HEAD(&ctx->list);
966 mutex_init(&ctx->aio_mutex);
967 init_completion(&ctx->done);
968 kref_init(&ctx->refcount);
973 cifs_aio_ctx_release(struct kref *refcount)
975 struct cifs_aio_ctx *ctx = container_of(refcount,
976 struct cifs_aio_ctx, refcount);
978 cifsFileInfo_put(ctx->cfile);
981 * ctx->bv is only set if setup_aio_ctx_iter() was call successfuly
982 * which means that iov_iter_get_pages() was a success and thus that
983 * we have taken reference on pages.
988 for (i = 0; i < ctx->npages; i++) {
989 if (ctx->should_dirty)
990 set_page_dirty(ctx->bv[i].bv_page);
991 put_page(ctx->bv[i].bv_page);
999 #define CIFS_AIO_KMALLOC_LIMIT (1024 * 1024)
1002 setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw)
1005 unsigned int cur_npages;
1006 unsigned int npages = 0;
1009 size_t count = iov_iter_count(iter);
1010 unsigned int saved_len;
1012 unsigned int max_pages = iov_iter_npages(iter, INT_MAX);
1013 struct page **pages = NULL;
1014 struct bio_vec *bv = NULL;
1016 if (iov_iter_is_kvec(iter)) {
1017 memcpy(&ctx->iter, iter, sizeof(*iter));
1019 iov_iter_advance(iter, count);
1023 if (array_size(max_pages, sizeof(*bv)) <= CIFS_AIO_KMALLOC_LIMIT)
1024 bv = kmalloc_array(max_pages, sizeof(*bv), GFP_KERNEL);
1027 bv = vmalloc(array_size(max_pages, sizeof(*bv)));
1032 if (array_size(max_pages, sizeof(*pages)) <= CIFS_AIO_KMALLOC_LIMIT)
1033 pages = kmalloc_array(max_pages, sizeof(*pages), GFP_KERNEL);
1036 pages = vmalloc(array_size(max_pages, sizeof(*pages)));
1045 while (count && npages < max_pages) {
1046 rc = iov_iter_get_pages2(iter, pages, count, max_pages, &start);
1048 cifs_dbg(VFS, "Couldn't get user pages (rc=%zd)\n", rc);
1053 cifs_dbg(VFS, "get pages rc=%zd more than %zu\n", rc,
1060 cur_npages = DIV_ROUND_UP(rc, PAGE_SIZE);
1062 if (npages + cur_npages > max_pages) {
1063 cifs_dbg(VFS, "out of vec array capacity (%u vs %u)\n",
1064 npages + cur_npages, max_pages);
1068 for (i = 0; i < cur_npages; i++) {
1069 len = rc > PAGE_SIZE ? PAGE_SIZE : rc;
1070 bv[npages + i].bv_page = pages[i];
1071 bv[npages + i].bv_offset = start;
1072 bv[npages + i].bv_len = len - start;
1077 npages += cur_npages;
1082 ctx->len = saved_len - count;
1083 ctx->npages = npages;
1084 iov_iter_bvec(&ctx->iter, rw, ctx->bv, npages, ctx->len);
1089 * cifs_alloc_hash - allocate hash and hash context together
1090 * @name: The name of the crypto hash algo
1091 * @sdesc: SHASH descriptor where to put the pointer to the hash TFM
1093 * The caller has to make sure @sdesc is initialized to either NULL or
1094 * a valid context. It can be freed via cifs_free_hash().
1097 cifs_alloc_hash(const char *name, struct shash_desc **sdesc)
1100 struct crypto_shash *alg = NULL;
1105 alg = crypto_alloc_shash(name, 0, 0);
1107 cifs_dbg(VFS, "Could not allocate shash TFM '%s'\n", name);
1113 *sdesc = kmalloc(sizeof(struct shash_desc) + crypto_shash_descsize(alg), GFP_KERNEL);
1114 if (*sdesc == NULL) {
1115 cifs_dbg(VFS, "no memory left to allocate shash TFM '%s'\n", name);
1116 crypto_free_shash(alg);
1120 (*sdesc)->tfm = alg;
1125 * cifs_free_hash - free hash and hash context together
1126 * @sdesc: Where to find the pointer to the hash TFM
1128 * Freeing a NULL descriptor is safe.
1131 cifs_free_hash(struct shash_desc **sdesc)
1133 if (unlikely(!sdesc) || !*sdesc)
1136 if ((*sdesc)->tfm) {
1137 crypto_free_shash((*sdesc)->tfm);
1138 (*sdesc)->tfm = NULL;
1141 kfree_sensitive(*sdesc);
1146 * rqst_page_get_length - obtain the length and offset for a page in smb_rqst
1147 * @rqst: The request descriptor
1148 * @page: The index of the page to query
1149 * @len: Where to store the length for this page:
1150 * @offset: Where to store the offset for this page
1152 void rqst_page_get_length(const struct smb_rqst *rqst, unsigned int page,
1153 unsigned int *len, unsigned int *offset)
1155 *len = rqst->rq_pagesz;
1156 *offset = (page == 0) ? rqst->rq_offset : 0;
1158 if (rqst->rq_npages == 1 || page == rqst->rq_npages-1)
1159 *len = rqst->rq_tailsz;
1161 *len = rqst->rq_pagesz - rqst->rq_offset;
1164 void extract_unc_hostname(const char *unc, const char **h, size_t *len)
1168 /* skip initial slashes */
1169 while (*unc && (*unc == '\\' || *unc == '/'))
1174 while (*end && !(*end == '\\' || *end == '/'))
1182 * copy_path_name - copy src path to dst, possibly truncating
1183 * @dst: The destination buffer
1184 * @src: The source name
1186 * returns number of bytes written (including trailing nul)
1188 int copy_path_name(char *dst, const char *src)
1193 * PATH_MAX includes nul, so if strlen(src) >= PATH_MAX it
1194 * will truncate and strlen(dst) will be PATH_MAX-1
1196 name_len = strscpy(dst, src, PATH_MAX);
1197 if (WARN_ON_ONCE(name_len < 0))
1198 name_len = PATH_MAX-1;
1200 /* we count the trailing nul */
1205 struct super_cb_data {
1207 struct super_block *sb;
1210 static void tcp_super_cb(struct super_block *sb, void *arg)
1212 struct super_cb_data *sd = arg;
1213 struct TCP_Server_Info *server = sd->data;
1214 struct cifs_sb_info *cifs_sb;
1215 struct cifs_tcon *tcon;
1220 cifs_sb = CIFS_SB(sb);
1221 tcon = cifs_sb_master_tcon(cifs_sb);
1222 if (tcon->ses->server == server)
1226 static struct super_block *__cifs_get_super(void (*f)(struct super_block *, void *),
1229 struct super_cb_data sd = {
1233 struct file_system_type **fs_type = (struct file_system_type *[]) {
1234 &cifs_fs_type, &smb3_fs_type, NULL,
1237 for (; *fs_type; fs_type++) {
1238 iterate_supers_type(*fs_type, f, &sd);
1241 * Grab an active reference in order to prevent automounts (DFS links)
1242 * of expiring and then freeing up our cifs superblock pointer while
1243 * we're doing failover.
1245 cifs_sb_active(sd.sb);
1249 return ERR_PTR(-EINVAL);
1252 static void __cifs_put_super(struct super_block *sb)
1254 if (!IS_ERR_OR_NULL(sb))
1255 cifs_sb_deactive(sb);
1258 struct super_block *cifs_get_tcp_super(struct TCP_Server_Info *server)
1260 return __cifs_get_super(tcp_super_cb, server);
1263 void cifs_put_tcp_super(struct super_block *sb)
1265 __cifs_put_super(sb);
1268 #ifdef CONFIG_CIFS_DFS_UPCALL
1269 int match_target_ip(struct TCP_Server_Info *server,
1270 const char *share, size_t share_len,
1274 char *target, *tip = NULL;
1275 struct sockaddr tipaddr;
1279 target = kzalloc(share_len + 3, GFP_KERNEL);
1285 scnprintf(target, share_len + 3, "\\\\%.*s", (int)share_len, share);
1287 cifs_dbg(FYI, "%s: target name: %s\n", __func__, target + 2);
1289 rc = dns_resolve_server_name_to_ip(target, &tip, NULL);
1293 cifs_dbg(FYI, "%s: target ip: %s\n", __func__, tip);
1295 if (!cifs_convert_address(&tipaddr, tip, strlen(tip))) {
1296 cifs_dbg(VFS, "%s: failed to convert target ip address\n",
1302 *result = cifs_match_ipaddr((struct sockaddr *)&server->dstaddr,
1304 cifs_dbg(FYI, "%s: ip addresses match: %u\n", __func__, *result);
1314 int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix)
1316 kfree(cifs_sb->prepath);
1318 if (prefix && *prefix) {
1319 cifs_sb->prepath = cifs_sanitize_prepath(prefix, GFP_ATOMIC);
1320 if (!cifs_sb->prepath)
1323 convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb));
1325 cifs_sb->prepath = NULL;
1327 cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
1332 * Handle weird Windows SMB server behaviour. It responds with
1333 * STATUS_OBJECT_NAME_INVALID code to SMB2 QUERY_INFO request for
1334 * "\<server>\<dfsname>\<linkpath>" DFS reference, where <dfsname> contains
1335 * non-ASCII unicode symbols.
1337 int cifs_inval_name_dfs_link_error(const unsigned int xid,
1338 struct cifs_tcon *tcon,
1339 struct cifs_sb_info *cifs_sb,
1340 const char *full_path,
1343 struct cifs_ses *ses = tcon->ses;
1351 * Fast path - skip check when @full_path doesn't have a prefix path to
1352 * look up or tcon is not DFS.
1354 if (strlen(full_path) < 2 || !cifs_sb ||
1355 (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) ||
1356 !is_tcon_dfs(tcon) || !ses->server->origin_fullpath)
1360 * Slow path - tcon is DFS and @full_path has prefix path, so attempt
1361 * to get a referral to figure out whether it is an DFS link.
1363 len = strnlen(tcon->tree_name, MAX_TREE_SIZE + 1) + strlen(full_path) + 1;
1364 path = kmalloc(len, GFP_KERNEL);
1368 scnprintf(path, len, "%s%s", tcon->tree_name, full_path);
1369 ref_path = dfs_cache_canonical_path(path + 1, cifs_sb->local_nls,
1370 cifs_remap(cifs_sb));
1373 if (IS_ERR(ref_path)) {
1374 if (PTR_ERR(ref_path) != -EINVAL)
1375 return PTR_ERR(ref_path);
1377 struct dfs_info3_param *refs = NULL;
1381 * XXX: we are not using dfs_cache_find() here because we might
1382 * end filling all the DFS cache and thus potentially
1383 * removing cached DFS targets that the client would eventually
1384 * need during failover.
1386 if (ses->server->ops->get_dfs_refer &&
1387 !ses->server->ops->get_dfs_refer(xid, ses, ref_path, &refs,
1388 &num_refs, cifs_sb->local_nls,
1389 cifs_remap(cifs_sb)))
1390 *islink = refs[0].server_type == DFS_TYPE_LINK;
1391 free_dfs_info_array(refs, num_refs);
1398 int cifs_wait_for_server_reconnect(struct TCP_Server_Info *server, bool retry)
1403 spin_lock(&server->srv_lock);
1404 if (server->tcpStatus != CifsNeedReconnect) {
1405 spin_unlock(&server->srv_lock);
1408 timeout *= server->nr_targets;
1409 spin_unlock(&server->srv_lock);
1412 * Give demultiplex thread up to 10 seconds to each target available for
1413 * reconnect -- should be greater than cifs socket timeout which is 7
1416 * On "soft" mounts we wait once. Hard mounts keep retrying until
1417 * process is killed or server comes back on-line.
1420 rc = wait_event_interruptible_timeout(server->response_q,
1421 (server->tcpStatus != CifsNeedReconnect),
1424 cifs_dbg(FYI, "%s: aborting reconnect due to received signal\n",
1426 return -ERESTARTSYS;
1429 /* are we still trying to reconnect? */
1430 spin_lock(&server->srv_lock);
1431 if (server->tcpStatus != CifsNeedReconnect) {
1432 spin_unlock(&server->srv_lock);
1435 spin_unlock(&server->srv_lock);
1438 cifs_dbg(FYI, "%s: gave up waiting on reconnect\n", __func__);