1 // SPDX-License-Identifier: GPL-2.0
3 * Functions to handle the cached directory entries
5 * Copyright (c) 2022, Ronnie Sahlberg <lsahlber@redhat.com>
8 #include <linux/namei.h>
10 #include "cifsproto.h"
11 #include "cifs_debug.h"
12 #include "smb2proto.h"
13 #include "cached_dir.h"
15 static struct cached_fid *init_cached_dir(const char *path);
16 static void free_cached_dir(struct cached_fid *cfid);
17 static void smb2_close_cached_fid(struct kref *ref);
18 static void cfids_laundromat_worker(struct work_struct *work);
20 static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
23 __u32 max_cached_dirs)
25 struct cached_fid *cfid;
27 spin_lock(&cfids->cfid_list_lock);
28 list_for_each_entry(cfid, &cfids->entries, entry) {
29 if (!strcmp(cfid->path, path)) {
31 * If it doesn't have a lease it is either not yet
32 * fully cached or it may be in the process of
33 * being deleted due to a lease break.
35 if (!cfid->time || !cfid->has_lease) {
36 spin_unlock(&cfids->cfid_list_lock);
39 kref_get(&cfid->refcount);
40 spin_unlock(&cfids->cfid_list_lock);
45 spin_unlock(&cfids->cfid_list_lock);
48 if (cfids->num_entries >= max_cached_dirs) {
49 spin_unlock(&cfids->cfid_list_lock);
52 cfid = init_cached_dir(path);
54 spin_unlock(&cfids->cfid_list_lock);
59 list_add(&cfid->entry, &cfids->entries);
61 kref_get(&cfid->refcount);
62 spin_unlock(&cfids->cfid_list_lock);
66 static struct dentry *
67 path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path)
69 struct dentry *dentry;
73 sep = CIFS_DIR_SEP(cifs_sb);
74 dentry = dget(cifs_sb->root);
78 struct inode *dir = d_inode(dentry);
81 if (!S_ISDIR(dir->i_mode)) {
83 dentry = ERR_PTR(-ENOTDIR);
94 while (*s && *s != sep)
97 child = lookup_positive_unlocked(p, dentry, s - p);
100 } while (!IS_ERR(dentry));
104 static const char *path_no_prefix(struct cifs_sb_info *cifs_sb,
112 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
114 len = strlen(cifs_sb->prepath) + 1;
115 if (unlikely(len > strlen(path)))
116 return ERR_PTR(-EINVAL);
122 * Open the and cache a directory handle.
123 * If error then *cfid is not initialized.
125 int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
127 struct cifs_sb_info *cifs_sb,
128 bool lookup_only, struct cached_fid **ret_cfid)
130 struct cifs_ses *ses;
131 struct TCP_Server_Info *server;
132 struct cifs_open_parms oparms;
133 struct smb2_create_rsp *o_rsp = NULL;
134 struct smb2_query_info_rsp *qi_rsp = NULL;
136 struct smb_rqst rqst[2];
137 struct kvec rsp_iov[2];
138 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
139 struct kvec qi_iov[1];
141 __le16 *utf16_path = NULL;
142 u8 oplock = SMB2_OPLOCK_LEVEL_II;
143 struct cifs_fid *pfid;
144 struct dentry *dentry = NULL;
145 struct cached_fid *cfid;
146 struct cached_fids *cfids;
148 int retries = 0, cur_sleep = 1;
150 if (tcon == NULL || tcon->cfids == NULL || tcon->nohandlecache ||
151 is_smb1_server(tcon->ses->server) || (dir_cache_timeout == 0))
157 if (cifs_sb->root == NULL)
161 /* reinitialize for possible replay */
163 oplock = SMB2_OPLOCK_LEVEL_II;
164 server = cifs_pick_channel(ses);
166 if (!server->ops->new_lease_key)
169 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
173 cfid = find_or_create_cached_dir(cfids, path, lookup_only, tcon->max_cached_dirs);
179 * Return cached fid if it has a lease. Otherwise, it is either a new
180 * entry or laundromat worker removed it from @cfids->entries. Caller
181 * will put last reference if the latter.
183 spin_lock(&cfids->cfid_list_lock);
184 if (cfid->has_lease) {
185 spin_unlock(&cfids->cfid_list_lock);
190 spin_unlock(&cfids->cfid_list_lock);
193 * Skip any prefix paths in @path as lookup_positive_unlocked() ends up
194 * calling ->lookup() which already adds those through
195 * build_path_from_dentry(). Also, do it earlier as we might reconnect
196 * below when trying to send compounded request and then potentially
197 * having a different prefix path (e.g. after DFS failover).
199 npath = path_no_prefix(cifs_sb, path);
206 dentry = dget(cifs_sb->root);
208 dentry = path_to_dentry(cifs_sb, npath);
209 if (IS_ERR(dentry)) {
214 cfid->dentry = dentry;
217 * We do not hold the lock for the open because in case
218 * SMB2_open needs to reconnect.
219 * This is safe because no other thread will be able to get a ref
220 * to the cfid until we have finished opening the file and (possibly)
223 if (smb3_encryption_required(tcon))
224 flags |= CIFS_TRANSFORM_REQ;
227 server->ops->new_lease_key(pfid);
229 memset(rqst, 0, sizeof(rqst));
230 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
231 memset(rsp_iov, 0, sizeof(rsp_iov));
234 memset(&open_iov, 0, sizeof(open_iov));
235 rqst[0].rq_iov = open_iov;
236 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
238 oparms = (struct cifs_open_parms) {
241 .create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE),
242 .desired_access = FILE_READ_DATA | FILE_READ_ATTRIBUTES |
244 .disposition = FILE_OPEN,
246 .replay = !!(retries),
249 rc = SMB2_open_init(tcon, server,
250 &rqst[0], &oplock, &oparms, utf16_path);
253 smb2_set_next_command(tcon, &rqst[0]);
255 memset(&qi_iov, 0, sizeof(qi_iov));
256 rqst[1].rq_iov = qi_iov;
259 rc = SMB2_query_info_init(tcon, server,
260 &rqst[1], COMPOUND_FID,
261 COMPOUND_FID, FILE_ALL_INFORMATION,
263 sizeof(struct smb2_file_all_info) +
264 PATH_MAX * 2, 0, NULL);
268 smb2_set_related(&rqst[1]);
271 * Set @cfid->has_lease to true before sending out compounded request so
272 * its lease reference can be put in cached_dir_lease_break() due to a
273 * potential lease break right after the request is sent or while @cfid
274 * is still being cached. Concurrent processes won't be to use it yet
275 * due to @cfid->time being zero.
277 cfid->has_lease = true;
280 smb2_set_replay(server, &rqst[0]);
281 smb2_set_replay(server, &rqst[1]);
284 rc = compound_send_recv(xid, ses, server,
286 resp_buftype, rsp_iov);
288 if (rc == -EREMCHG) {
289 tcon->need_reconnect = true;
290 pr_warn_once("server share %s deleted\n",
296 cfid->is_open = true;
298 spin_lock(&cfids->cfid_list_lock);
300 o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
301 oparms.fid->persistent_fid = o_rsp->PersistentFileId;
302 oparms.fid->volatile_fid = o_rsp->VolatileFileId;
303 #ifdef CONFIG_CIFS_DEBUG2
304 oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
305 #endif /* CIFS_DEBUG2 */
308 if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) {
309 spin_unlock(&cfids->cfid_list_lock);
314 rc = smb2_parse_contexts(server, rsp_iov,
316 oparms.fid->lease_key,
317 &oplock, NULL, NULL);
319 spin_unlock(&cfids->cfid_list_lock);
324 if (!(oplock & SMB2_LEASE_READ_CACHING_HE)) {
325 spin_unlock(&cfids->cfid_list_lock);
328 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
329 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) {
330 spin_unlock(&cfids->cfid_list_lock);
333 if (!smb2_validate_and_copy_iov(
334 le16_to_cpu(qi_rsp->OutputBufferOffset),
335 sizeof(struct smb2_file_all_info),
336 &rsp_iov[1], sizeof(struct smb2_file_all_info),
337 (char *)&cfid->file_all_info))
338 cfid->file_all_info_is_valid = true;
340 cfid->time = jiffies;
341 spin_unlock(&cfids->cfid_list_lock);
342 /* At this point the directory handle is fully cached */
346 SMB2_open_free(&rqst[0]);
347 SMB2_query_info_free(&rqst[1]);
348 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
349 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
351 spin_lock(&cfids->cfid_list_lock);
353 list_del(&cfid->entry);
354 cfid->on_list = false;
355 cfids->num_entries--;
357 if (cfid->has_lease) {
359 * We are guaranteed to have two references at this
360 * point. One for the caller and one for a potential
361 * lease. Release the Lease-ref so that the directory
362 * will be closed when the caller closes the cached
365 cfid->has_lease = false;
366 spin_unlock(&cfids->cfid_list_lock);
367 kref_put(&cfid->refcount, smb2_close_cached_fid);
370 spin_unlock(&cfids->cfid_list_lock);
375 SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
376 cfid->fid.volatile_fid);
377 free_cached_dir(cfid);
380 atomic_inc(&tcon->num_remote_opens);
384 if (is_replayable_error(rc) &&
385 smb2_should_replay(tcon, &retries, &cur_sleep))
391 int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
392 struct dentry *dentry,
393 struct cached_fid **ret_cfid)
395 struct cached_fid *cfid;
396 struct cached_fids *cfids = tcon->cfids;
401 spin_lock(&cfids->cfid_list_lock);
402 list_for_each_entry(cfid, &cfids->entries, entry) {
403 if (dentry && cfid->dentry == dentry) {
404 cifs_dbg(FYI, "found a cached root file handle by dentry\n");
405 kref_get(&cfid->refcount);
407 spin_unlock(&cfids->cfid_list_lock);
411 spin_unlock(&cfids->cfid_list_lock);
416 smb2_close_cached_fid(struct kref *ref)
418 struct cached_fid *cfid = container_of(ref, struct cached_fid,
422 spin_lock(&cfid->cfids->cfid_list_lock);
424 list_del(&cfid->entry);
425 cfid->on_list = false;
426 cfid->cfids->num_entries--;
428 spin_unlock(&cfid->cfids->cfid_list_lock);
434 rc = SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
435 cfid->fid.volatile_fid);
436 if (rc) /* should we retry on -EBUSY or -EAGAIN? */
437 cifs_dbg(VFS, "close cached dir rc %d\n", rc);
440 free_cached_dir(cfid);
443 void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon,
444 const char *name, struct cifs_sb_info *cifs_sb)
446 struct cached_fid *cfid = NULL;
449 rc = open_cached_dir(xid, tcon, name, cifs_sb, true, &cfid);
451 cifs_dbg(FYI, "no cached dir found for rmdir(%s)\n", name);
454 spin_lock(&cfid->cfids->cfid_list_lock);
455 if (cfid->has_lease) {
456 cfid->has_lease = false;
457 kref_put(&cfid->refcount, smb2_close_cached_fid);
459 spin_unlock(&cfid->cfids->cfid_list_lock);
460 close_cached_dir(cfid);
464 void close_cached_dir(struct cached_fid *cfid)
466 kref_put(&cfid->refcount, smb2_close_cached_fid);
470 * Called from cifs_kill_sb when we unmount a share
472 void close_all_cached_dirs(struct cifs_sb_info *cifs_sb)
474 struct rb_root *root = &cifs_sb->tlink_tree;
475 struct rb_node *node;
476 struct cached_fid *cfid;
477 struct cifs_tcon *tcon;
478 struct tcon_link *tlink;
479 struct cached_fids *cfids;
481 for (node = rb_first(root); node; node = rb_next(node)) {
482 tlink = rb_entry(node, struct tcon_link, tl_rbnode);
483 tcon = tlink_tcon(tlink);
489 list_for_each_entry(cfid, &cfids->entries, entry) {
497 * Invalidate all cached dirs when a TCON has been reset
498 * due to a session loss.
500 void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
502 struct cached_fids *cfids = tcon->cfids;
503 struct cached_fid *cfid, *q;
509 spin_lock(&cfids->cfid_list_lock);
510 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
511 list_move(&cfid->entry, &entry);
512 cfids->num_entries--;
513 cfid->is_open = false;
514 cfid->on_list = false;
515 /* To prevent race with smb2_cached_lease_break() */
516 kref_get(&cfid->refcount);
518 spin_unlock(&cfids->cfid_list_lock);
520 list_for_each_entry_safe(cfid, q, &entry, entry) {
521 list_del(&cfid->entry);
522 cancel_work_sync(&cfid->lease_break);
523 if (cfid->has_lease) {
525 * We lease was never cancelled from the server so we
526 * need to drop the reference.
528 spin_lock(&cfids->cfid_list_lock);
529 cfid->has_lease = false;
530 spin_unlock(&cfids->cfid_list_lock);
531 kref_put(&cfid->refcount, smb2_close_cached_fid);
533 /* Drop the extra reference opened above*/
534 kref_put(&cfid->refcount, smb2_close_cached_fid);
539 smb2_cached_lease_break(struct work_struct *work)
541 struct cached_fid *cfid = container_of(work,
542 struct cached_fid, lease_break);
544 spin_lock(&cfid->cfids->cfid_list_lock);
545 cfid->has_lease = false;
546 spin_unlock(&cfid->cfids->cfid_list_lock);
547 kref_put(&cfid->refcount, smb2_close_cached_fid);
550 int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16])
552 struct cached_fids *cfids = tcon->cfids;
553 struct cached_fid *cfid;
558 spin_lock(&cfids->cfid_list_lock);
559 list_for_each_entry(cfid, &cfids->entries, entry) {
560 if (cfid->has_lease &&
563 SMB2_LEASE_KEY_SIZE)) {
566 * We found a lease remove it from the list
567 * so no threads can access it.
569 list_del(&cfid->entry);
570 cfid->on_list = false;
571 cfids->num_entries--;
573 queue_work(cifsiod_wq,
575 spin_unlock(&cfids->cfid_list_lock);
579 spin_unlock(&cfids->cfid_list_lock);
583 static struct cached_fid *init_cached_dir(const char *path)
585 struct cached_fid *cfid;
587 cfid = kzalloc(sizeof(*cfid), GFP_ATOMIC);
590 cfid->path = kstrdup(path, GFP_ATOMIC);
596 INIT_WORK(&cfid->lease_break, smb2_cached_lease_break);
597 INIT_LIST_HEAD(&cfid->entry);
598 INIT_LIST_HEAD(&cfid->dirents.entries);
599 mutex_init(&cfid->dirents.de_mutex);
600 spin_lock_init(&cfid->fid_lock);
601 kref_init(&cfid->refcount);
605 static void free_cached_dir(struct cached_fid *cfid)
607 struct cached_dirent *dirent, *q;
613 * Delete all cached dirent names
615 list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) {
616 list_del(&dirent->entry);
626 static void cfids_laundromat_worker(struct work_struct *work)
628 struct cached_fids *cfids;
629 struct cached_fid *cfid, *q;
632 cfids = container_of(work, struct cached_fids, laundromat_work.work);
634 spin_lock(&cfids->cfid_list_lock);
635 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
637 time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) {
638 cfid->on_list = false;
639 list_move(&cfid->entry, &entry);
640 cfids->num_entries--;
641 /* To prevent race with smb2_cached_lease_break() */
642 kref_get(&cfid->refcount);
645 spin_unlock(&cfids->cfid_list_lock);
647 list_for_each_entry_safe(cfid, q, &entry, entry) {
648 list_del(&cfid->entry);
650 * Cancel and wait for the work to finish in case we are racing
653 cancel_work_sync(&cfid->lease_break);
654 if (cfid->has_lease) {
656 * Our lease has not yet been cancelled from the server
657 * so we need to drop the reference.
659 spin_lock(&cfids->cfid_list_lock);
660 cfid->has_lease = false;
661 spin_unlock(&cfids->cfid_list_lock);
662 kref_put(&cfid->refcount, smb2_close_cached_fid);
664 /* Drop the extra reference opened above */
665 kref_put(&cfid->refcount, smb2_close_cached_fid);
667 queue_delayed_work(cifsiod_wq, &cfids->laundromat_work,
668 dir_cache_timeout * HZ);
671 struct cached_fids *init_cached_dirs(void)
673 struct cached_fids *cfids;
675 cfids = kzalloc(sizeof(*cfids), GFP_KERNEL);
678 spin_lock_init(&cfids->cfid_list_lock);
679 INIT_LIST_HEAD(&cfids->entries);
681 INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker);
682 queue_delayed_work(cifsiod_wq, &cfids->laundromat_work,
683 dir_cache_timeout * HZ);
689 * Called from tconInfoFree when we are tearing down the tcon.
690 * There are no active users or open files/directories at this point.
692 void free_cached_dirs(struct cached_fids *cfids)
694 struct cached_fid *cfid, *q;
700 cancel_delayed_work_sync(&cfids->laundromat_work);
702 spin_lock(&cfids->cfid_list_lock);
703 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
704 cfid->on_list = false;
705 cfid->is_open = false;
706 list_move(&cfid->entry, &entry);
708 spin_unlock(&cfids->cfid_list_lock);
710 list_for_each_entry_safe(cfid, q, &entry, entry) {
711 list_del(&cfid->entry);
712 free_cached_dir(cfid);