1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   *  Functions to handle the cached directory entries
4   *
5   *  Copyright (c) 2022, Ronnie Sahlberg <lsahlber@redhat.com>
6   */
7  
8  #include <linux/namei.h>
9  #include "cifsglob.h"
10  #include "cifsproto.h"
11  #include "cifs_debug.h"
12  #include "smb2proto.h"
13  #include "cached_dir.h"
14  
15  static struct cached_fid *init_cached_dir(const char *path);
16  static void free_cached_dir(struct cached_fid *cfid);
17  static void smb2_close_cached_fid(struct kref *ref);
18  static void cfids_laundromat_worker(struct work_struct *work);
19  
find_or_create_cached_dir(struct cached_fids * cfids,const char * path,bool lookup_only,__u32 max_cached_dirs)20  static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
21  						    const char *path,
22  						    bool lookup_only,
23  						    __u32 max_cached_dirs)
24  {
25  	struct cached_fid *cfid;
26  
27  	spin_lock(&cfids->cfid_list_lock);
28  	list_for_each_entry(cfid, &cfids->entries, entry) {
29  		if (!strcmp(cfid->path, path)) {
30  			/*
31  			 * If it doesn't have a lease it is either not yet
32  			 * fully cached or it may be in the process of
33  			 * being deleted due to a lease break.
34  			 */
35  			if (!cfid->time || !cfid->has_lease) {
36  				spin_unlock(&cfids->cfid_list_lock);
37  				return NULL;
38  			}
39  			kref_get(&cfid->refcount);
40  			spin_unlock(&cfids->cfid_list_lock);
41  			return cfid;
42  		}
43  	}
44  	if (lookup_only) {
45  		spin_unlock(&cfids->cfid_list_lock);
46  		return NULL;
47  	}
48  	if (cfids->num_entries >= max_cached_dirs) {
49  		spin_unlock(&cfids->cfid_list_lock);
50  		return NULL;
51  	}
52  	cfid = init_cached_dir(path);
53  	if (cfid == NULL) {
54  		spin_unlock(&cfids->cfid_list_lock);
55  		return NULL;
56  	}
57  	cfid->cfids = cfids;
58  	cfids->num_entries++;
59  	list_add(&cfid->entry, &cfids->entries);
60  	cfid->on_list = true;
61  	kref_get(&cfid->refcount);
62  	spin_unlock(&cfids->cfid_list_lock);
63  	return cfid;
64  }
65  
66  static struct dentry *
path_to_dentry(struct cifs_sb_info * cifs_sb,const char * path)67  path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path)
68  {
69  	struct dentry *dentry;
70  	const char *s, *p;
71  	char sep;
72  
73  	sep = CIFS_DIR_SEP(cifs_sb);
74  	dentry = dget(cifs_sb->root);
75  	s = path;
76  
77  	do {
78  		struct inode *dir = d_inode(dentry);
79  		struct dentry *child;
80  
81  		if (!S_ISDIR(dir->i_mode)) {
82  			dput(dentry);
83  			dentry = ERR_PTR(-ENOTDIR);
84  			break;
85  		}
86  
87  		/* skip separators */
88  		while (*s == sep)
89  			s++;
90  		if (!*s)
91  			break;
92  		p = s++;
93  		/* next separator */
94  		while (*s && *s != sep)
95  			s++;
96  
97  		child = lookup_positive_unlocked(p, dentry, s - p);
98  		dput(dentry);
99  		dentry = child;
100  	} while (!IS_ERR(dentry));
101  	return dentry;
102  }
103  
path_no_prefix(struct cifs_sb_info * cifs_sb,const char * path)104  static const char *path_no_prefix(struct cifs_sb_info *cifs_sb,
105  				  const char *path)
106  {
107  	size_t len = 0;
108  
109  	if (!*path)
110  		return path;
111  
112  	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
113  	    cifs_sb->prepath) {
114  		len = strlen(cifs_sb->prepath) + 1;
115  		if (unlikely(len > strlen(path)))
116  			return ERR_PTR(-EINVAL);
117  	}
118  	return path + len;
119  }
120  
121  /*
122   * Open the and cache a directory handle.
123   * If error then *cfid is not initialized.
124   */
open_cached_dir(unsigned int xid,struct cifs_tcon * tcon,const char * path,struct cifs_sb_info * cifs_sb,bool lookup_only,struct cached_fid ** ret_cfid)125  int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
126  		    const char *path,
127  		    struct cifs_sb_info *cifs_sb,
128  		    bool lookup_only, struct cached_fid **ret_cfid)
129  {
130  	struct cifs_ses *ses;
131  	struct TCP_Server_Info *server;
132  	struct cifs_open_parms oparms;
133  	struct smb2_create_rsp *o_rsp = NULL;
134  	struct smb2_query_info_rsp *qi_rsp = NULL;
135  	int resp_buftype[2];
136  	struct smb_rqst rqst[2];
137  	struct kvec rsp_iov[2];
138  	struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
139  	struct kvec qi_iov[1];
140  	int rc, flags = 0;
141  	__le16 *utf16_path = NULL;
142  	u8 oplock = SMB2_OPLOCK_LEVEL_II;
143  	struct cifs_fid *pfid;
144  	struct dentry *dentry = NULL;
145  	struct cached_fid *cfid;
146  	struct cached_fids *cfids;
147  	const char *npath;
148  	int retries = 0, cur_sleep = 1;
149  
150  	if (tcon == NULL || tcon->cfids == NULL || tcon->nohandlecache ||
151  	    is_smb1_server(tcon->ses->server) || (dir_cache_timeout == 0))
152  		return -EOPNOTSUPP;
153  
154  	ses = tcon->ses;
155  	cfids = tcon->cfids;
156  
157  	if (cifs_sb->root == NULL)
158  		return -ENOENT;
159  
160  replay_again:
161  	/* reinitialize for possible replay */
162  	flags = 0;
163  	oplock = SMB2_OPLOCK_LEVEL_II;
164  	server = cifs_pick_channel(ses);
165  
166  	if (!server->ops->new_lease_key)
167  		return -EIO;
168  
169  	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
170  	if (!utf16_path)
171  		return -ENOMEM;
172  
173  	cfid = find_or_create_cached_dir(cfids, path, lookup_only, tcon->max_cached_dirs);
174  	if (cfid == NULL) {
175  		kfree(utf16_path);
176  		return -ENOENT;
177  	}
178  	/*
179  	 * Return cached fid if it has a lease.  Otherwise, it is either a new
180  	 * entry or laundromat worker removed it from @cfids->entries.  Caller
181  	 * will put last reference if the latter.
182  	 */
183  	spin_lock(&cfids->cfid_list_lock);
184  	if (cfid->has_lease) {
185  		spin_unlock(&cfids->cfid_list_lock);
186  		*ret_cfid = cfid;
187  		kfree(utf16_path);
188  		return 0;
189  	}
190  	spin_unlock(&cfids->cfid_list_lock);
191  
192  	/*
193  	 * Skip any prefix paths in @path as lookup_positive_unlocked() ends up
194  	 * calling ->lookup() which already adds those through
195  	 * build_path_from_dentry().  Also, do it earlier as we might reconnect
196  	 * below when trying to send compounded request and then potentially
197  	 * having a different prefix path (e.g. after DFS failover).
198  	 */
199  	npath = path_no_prefix(cifs_sb, path);
200  	if (IS_ERR(npath)) {
201  		rc = PTR_ERR(npath);
202  		goto out;
203  	}
204  
205  	if (!npath[0]) {
206  		dentry = dget(cifs_sb->root);
207  	} else {
208  		dentry = path_to_dentry(cifs_sb, npath);
209  		if (IS_ERR(dentry)) {
210  			rc = -ENOENT;
211  			goto out;
212  		}
213  	}
214  	cfid->dentry = dentry;
215  
216  	/*
217  	 * We do not hold the lock for the open because in case
218  	 * SMB2_open needs to reconnect.
219  	 * This is safe because no other thread will be able to get a ref
220  	 * to the cfid until we have finished opening the file and (possibly)
221  	 * acquired a lease.
222  	 */
223  	if (smb3_encryption_required(tcon))
224  		flags |= CIFS_TRANSFORM_REQ;
225  
226  	pfid = &cfid->fid;
227  	server->ops->new_lease_key(pfid);
228  
229  	memset(rqst, 0, sizeof(rqst));
230  	resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
231  	memset(rsp_iov, 0, sizeof(rsp_iov));
232  
233  	/* Open */
234  	memset(&open_iov, 0, sizeof(open_iov));
235  	rqst[0].rq_iov = open_iov;
236  	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
237  
238  	oparms = (struct cifs_open_parms) {
239  		.tcon = tcon,
240  		.path = path,
241  		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE),
242  		.desired_access =  FILE_READ_DATA | FILE_READ_ATTRIBUTES |
243  				   FILE_READ_EA,
244  		.disposition = FILE_OPEN,
245  		.fid = pfid,
246  		.replay = !!(retries),
247  	};
248  
249  	rc = SMB2_open_init(tcon, server,
250  			    &rqst[0], &oplock, &oparms, utf16_path);
251  	if (rc)
252  		goto oshr_free;
253  	smb2_set_next_command(tcon, &rqst[0]);
254  
255  	memset(&qi_iov, 0, sizeof(qi_iov));
256  	rqst[1].rq_iov = qi_iov;
257  	rqst[1].rq_nvec = 1;
258  
259  	rc = SMB2_query_info_init(tcon, server,
260  				  &rqst[1], COMPOUND_FID,
261  				  COMPOUND_FID, FILE_ALL_INFORMATION,
262  				  SMB2_O_INFO_FILE, 0,
263  				  sizeof(struct smb2_file_all_info) +
264  				  PATH_MAX * 2, 0, NULL);
265  	if (rc)
266  		goto oshr_free;
267  
268  	smb2_set_related(&rqst[1]);
269  
270  	/*
271  	 * Set @cfid->has_lease to true before sending out compounded request so
272  	 * its lease reference can be put in cached_dir_lease_break() due to a
273  	 * potential lease break right after the request is sent or while @cfid
274  	 * is still being cached.  Concurrent processes won't be to use it yet
275  	 * due to @cfid->time being zero.
276  	 */
277  	cfid->has_lease = true;
278  
279  	if (retries) {
280  		smb2_set_replay(server, &rqst[0]);
281  		smb2_set_replay(server, &rqst[1]);
282  	}
283  
284  	rc = compound_send_recv(xid, ses, server,
285  				flags, 2, rqst,
286  				resp_buftype, rsp_iov);
287  	if (rc) {
288  		if (rc == -EREMCHG) {
289  			tcon->need_reconnect = true;
290  			pr_warn_once("server share %s deleted\n",
291  				     tcon->tree_name);
292  		}
293  		goto oshr_free;
294  	}
295  	cfid->tcon = tcon;
296  	cfid->is_open = true;
297  
298  	spin_lock(&cfids->cfid_list_lock);
299  
300  	o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
301  	oparms.fid->persistent_fid = o_rsp->PersistentFileId;
302  	oparms.fid->volatile_fid = o_rsp->VolatileFileId;
303  #ifdef CONFIG_CIFS_DEBUG2
304  	oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
305  #endif /* CIFS_DEBUG2 */
306  
307  
308  	if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) {
309  		spin_unlock(&cfids->cfid_list_lock);
310  		rc = -EINVAL;
311  		goto oshr_free;
312  	}
313  
314  	rc = smb2_parse_contexts(server, rsp_iov,
315  				 &oparms.fid->epoch,
316  				 oparms.fid->lease_key,
317  				 &oplock, NULL, NULL);
318  	if (rc) {
319  		spin_unlock(&cfids->cfid_list_lock);
320  		goto oshr_free;
321  	}
322  
323  	rc = -EINVAL;
324  	if (!(oplock & SMB2_LEASE_READ_CACHING_HE)) {
325  		spin_unlock(&cfids->cfid_list_lock);
326  		goto oshr_free;
327  	}
328  	qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
329  	if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) {
330  		spin_unlock(&cfids->cfid_list_lock);
331  		goto oshr_free;
332  	}
333  	if (!smb2_validate_and_copy_iov(
334  				le16_to_cpu(qi_rsp->OutputBufferOffset),
335  				sizeof(struct smb2_file_all_info),
336  				&rsp_iov[1], sizeof(struct smb2_file_all_info),
337  				(char *)&cfid->file_all_info))
338  		cfid->file_all_info_is_valid = true;
339  
340  	cfid->time = jiffies;
341  	spin_unlock(&cfids->cfid_list_lock);
342  	/* At this point the directory handle is fully cached */
343  	rc = 0;
344  
345  oshr_free:
346  	SMB2_open_free(&rqst[0]);
347  	SMB2_query_info_free(&rqst[1]);
348  	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
349  	free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
350  	if (rc) {
351  		spin_lock(&cfids->cfid_list_lock);
352  		if (cfid->on_list) {
353  			list_del(&cfid->entry);
354  			cfid->on_list = false;
355  			cfids->num_entries--;
356  		}
357  		if (cfid->has_lease) {
358  			/*
359  			 * We are guaranteed to have two references at this
360  			 * point. One for the caller and one for a potential
361  			 * lease. Release the Lease-ref so that the directory
362  			 * will be closed when the caller closes the cached
363  			 * handle.
364  			 */
365  			cfid->has_lease = false;
366  			spin_unlock(&cfids->cfid_list_lock);
367  			kref_put(&cfid->refcount, smb2_close_cached_fid);
368  			goto out;
369  		}
370  		spin_unlock(&cfids->cfid_list_lock);
371  	}
372  out:
373  	if (rc) {
374  		if (cfid->is_open)
375  			SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
376  				   cfid->fid.volatile_fid);
377  		free_cached_dir(cfid);
378  	} else {
379  		*ret_cfid = cfid;
380  		atomic_inc(&tcon->num_remote_opens);
381  	}
382  	kfree(utf16_path);
383  
384  	if (is_replayable_error(rc) &&
385  	    smb2_should_replay(tcon, &retries, &cur_sleep))
386  		goto replay_again;
387  
388  	return rc;
389  }
390  
open_cached_dir_by_dentry(struct cifs_tcon * tcon,struct dentry * dentry,struct cached_fid ** ret_cfid)391  int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
392  			      struct dentry *dentry,
393  			      struct cached_fid **ret_cfid)
394  {
395  	struct cached_fid *cfid;
396  	struct cached_fids *cfids = tcon->cfids;
397  
398  	if (cfids == NULL)
399  		return -ENOENT;
400  
401  	spin_lock(&cfids->cfid_list_lock);
402  	list_for_each_entry(cfid, &cfids->entries, entry) {
403  		if (dentry && cfid->dentry == dentry) {
404  			cifs_dbg(FYI, "found a cached root file handle by dentry\n");
405  			kref_get(&cfid->refcount);
406  			*ret_cfid = cfid;
407  			spin_unlock(&cfids->cfid_list_lock);
408  			return 0;
409  		}
410  	}
411  	spin_unlock(&cfids->cfid_list_lock);
412  	return -ENOENT;
413  }
414  
415  static void
smb2_close_cached_fid(struct kref * ref)416  smb2_close_cached_fid(struct kref *ref)
417  {
418  	struct cached_fid *cfid = container_of(ref, struct cached_fid,
419  					       refcount);
420  	int rc;
421  
422  	spin_lock(&cfid->cfids->cfid_list_lock);
423  	if (cfid->on_list) {
424  		list_del(&cfid->entry);
425  		cfid->on_list = false;
426  		cfid->cfids->num_entries--;
427  	}
428  	spin_unlock(&cfid->cfids->cfid_list_lock);
429  
430  	dput(cfid->dentry);
431  	cfid->dentry = NULL;
432  
433  	if (cfid->is_open) {
434  		rc = SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
435  			   cfid->fid.volatile_fid);
436  		if (rc) /* should we retry on -EBUSY or -EAGAIN? */
437  			cifs_dbg(VFS, "close cached dir rc %d\n", rc);
438  	}
439  
440  	free_cached_dir(cfid);
441  }
442  
drop_cached_dir_by_name(const unsigned int xid,struct cifs_tcon * tcon,const char * name,struct cifs_sb_info * cifs_sb)443  void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon,
444  			     const char *name, struct cifs_sb_info *cifs_sb)
445  {
446  	struct cached_fid *cfid = NULL;
447  	int rc;
448  
449  	rc = open_cached_dir(xid, tcon, name, cifs_sb, true, &cfid);
450  	if (rc) {
451  		cifs_dbg(FYI, "no cached dir found for rmdir(%s)\n", name);
452  		return;
453  	}
454  	spin_lock(&cfid->cfids->cfid_list_lock);
455  	if (cfid->has_lease) {
456  		cfid->has_lease = false;
457  		kref_put(&cfid->refcount, smb2_close_cached_fid);
458  	}
459  	spin_unlock(&cfid->cfids->cfid_list_lock);
460  	close_cached_dir(cfid);
461  }
462  
463  
close_cached_dir(struct cached_fid * cfid)464  void close_cached_dir(struct cached_fid *cfid)
465  {
466  	kref_put(&cfid->refcount, smb2_close_cached_fid);
467  }
468  
469  /*
470   * Called from cifs_kill_sb when we unmount a share
471   */
close_all_cached_dirs(struct cifs_sb_info * cifs_sb)472  void close_all_cached_dirs(struct cifs_sb_info *cifs_sb)
473  {
474  	struct rb_root *root = &cifs_sb->tlink_tree;
475  	struct rb_node *node;
476  	struct cached_fid *cfid;
477  	struct cifs_tcon *tcon;
478  	struct tcon_link *tlink;
479  	struct cached_fids *cfids;
480  
481  	for (node = rb_first(root); node; node = rb_next(node)) {
482  		tlink = rb_entry(node, struct tcon_link, tl_rbnode);
483  		tcon = tlink_tcon(tlink);
484  		if (IS_ERR(tcon))
485  			continue;
486  		cfids = tcon->cfids;
487  		if (cfids == NULL)
488  			continue;
489  		list_for_each_entry(cfid, &cfids->entries, entry) {
490  			dput(cfid->dentry);
491  			cfid->dentry = NULL;
492  		}
493  	}
494  }
495  
496  /*
497   * Invalidate all cached dirs when a TCON has been reset
498   * due to a session loss.
499   */
invalidate_all_cached_dirs(struct cifs_tcon * tcon)500  void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
501  {
502  	struct cached_fids *cfids = tcon->cfids;
503  	struct cached_fid *cfid, *q;
504  	LIST_HEAD(entry);
505  
506  	if (cfids == NULL)
507  		return;
508  
509  	spin_lock(&cfids->cfid_list_lock);
510  	list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
511  		list_move(&cfid->entry, &entry);
512  		cfids->num_entries--;
513  		cfid->is_open = false;
514  		cfid->on_list = false;
515  		/* To prevent race with smb2_cached_lease_break() */
516  		kref_get(&cfid->refcount);
517  	}
518  	spin_unlock(&cfids->cfid_list_lock);
519  
520  	list_for_each_entry_safe(cfid, q, &entry, entry) {
521  		list_del(&cfid->entry);
522  		cancel_work_sync(&cfid->lease_break);
523  		if (cfid->has_lease) {
524  			/*
525  			 * We lease was never cancelled from the server so we
526  			 * need to drop the reference.
527  			 */
528  			spin_lock(&cfids->cfid_list_lock);
529  			cfid->has_lease = false;
530  			spin_unlock(&cfids->cfid_list_lock);
531  			kref_put(&cfid->refcount, smb2_close_cached_fid);
532  		}
533  		/* Drop the extra reference opened above*/
534  		kref_put(&cfid->refcount, smb2_close_cached_fid);
535  	}
536  }
537  
538  static void
smb2_cached_lease_break(struct work_struct * work)539  smb2_cached_lease_break(struct work_struct *work)
540  {
541  	struct cached_fid *cfid = container_of(work,
542  				struct cached_fid, lease_break);
543  
544  	spin_lock(&cfid->cfids->cfid_list_lock);
545  	cfid->has_lease = false;
546  	spin_unlock(&cfid->cfids->cfid_list_lock);
547  	kref_put(&cfid->refcount, smb2_close_cached_fid);
548  }
549  
cached_dir_lease_break(struct cifs_tcon * tcon,__u8 lease_key[16])550  int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16])
551  {
552  	struct cached_fids *cfids = tcon->cfids;
553  	struct cached_fid *cfid;
554  
555  	if (cfids == NULL)
556  		return false;
557  
558  	spin_lock(&cfids->cfid_list_lock);
559  	list_for_each_entry(cfid, &cfids->entries, entry) {
560  		if (cfid->has_lease &&
561  		    !memcmp(lease_key,
562  			    cfid->fid.lease_key,
563  			    SMB2_LEASE_KEY_SIZE)) {
564  			cfid->time = 0;
565  			/*
566  			 * We found a lease remove it from the list
567  			 * so no threads can access it.
568  			 */
569  			list_del(&cfid->entry);
570  			cfid->on_list = false;
571  			cfids->num_entries--;
572  
573  			queue_work(cifsiod_wq,
574  				   &cfid->lease_break);
575  			spin_unlock(&cfids->cfid_list_lock);
576  			return true;
577  		}
578  	}
579  	spin_unlock(&cfids->cfid_list_lock);
580  	return false;
581  }
582  
init_cached_dir(const char * path)583  static struct cached_fid *init_cached_dir(const char *path)
584  {
585  	struct cached_fid *cfid;
586  
587  	cfid = kzalloc(sizeof(*cfid), GFP_ATOMIC);
588  	if (!cfid)
589  		return NULL;
590  	cfid->path = kstrdup(path, GFP_ATOMIC);
591  	if (!cfid->path) {
592  		kfree(cfid);
593  		return NULL;
594  	}
595  
596  	INIT_WORK(&cfid->lease_break, smb2_cached_lease_break);
597  	INIT_LIST_HEAD(&cfid->entry);
598  	INIT_LIST_HEAD(&cfid->dirents.entries);
599  	mutex_init(&cfid->dirents.de_mutex);
600  	spin_lock_init(&cfid->fid_lock);
601  	kref_init(&cfid->refcount);
602  	return cfid;
603  }
604  
free_cached_dir(struct cached_fid * cfid)605  static void free_cached_dir(struct cached_fid *cfid)
606  {
607  	struct cached_dirent *dirent, *q;
608  
609  	dput(cfid->dentry);
610  	cfid->dentry = NULL;
611  
612  	/*
613  	 * Delete all cached dirent names
614  	 */
615  	list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) {
616  		list_del(&dirent->entry);
617  		kfree(dirent->name);
618  		kfree(dirent);
619  	}
620  
621  	kfree(cfid->path);
622  	cfid->path = NULL;
623  	kfree(cfid);
624  }
625  
cfids_laundromat_worker(struct work_struct * work)626  static void cfids_laundromat_worker(struct work_struct *work)
627  {
628  	struct cached_fids *cfids;
629  	struct cached_fid *cfid, *q;
630  	LIST_HEAD(entry);
631  
632  	cfids = container_of(work, struct cached_fids, laundromat_work.work);
633  
634  	spin_lock(&cfids->cfid_list_lock);
635  	list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
636  		if (cfid->time &&
637  		    time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) {
638  			cfid->on_list = false;
639  			list_move(&cfid->entry, &entry);
640  			cfids->num_entries--;
641  			/* To prevent race with smb2_cached_lease_break() */
642  			kref_get(&cfid->refcount);
643  		}
644  	}
645  	spin_unlock(&cfids->cfid_list_lock);
646  
647  	list_for_each_entry_safe(cfid, q, &entry, entry) {
648  		list_del(&cfid->entry);
649  		/*
650  		 * Cancel and wait for the work to finish in case we are racing
651  		 * with it.
652  		 */
653  		cancel_work_sync(&cfid->lease_break);
654  		if (cfid->has_lease) {
655  			/*
656  			 * Our lease has not yet been cancelled from the server
657  			 * so we need to drop the reference.
658  			 */
659  			spin_lock(&cfids->cfid_list_lock);
660  			cfid->has_lease = false;
661  			spin_unlock(&cfids->cfid_list_lock);
662  			kref_put(&cfid->refcount, smb2_close_cached_fid);
663  		}
664  		/* Drop the extra reference opened above */
665  		kref_put(&cfid->refcount, smb2_close_cached_fid);
666  	}
667  	queue_delayed_work(cifsiod_wq, &cfids->laundromat_work,
668  			   dir_cache_timeout * HZ);
669  }
670  
init_cached_dirs(void)671  struct cached_fids *init_cached_dirs(void)
672  {
673  	struct cached_fids *cfids;
674  
675  	cfids = kzalloc(sizeof(*cfids), GFP_KERNEL);
676  	if (!cfids)
677  		return NULL;
678  	spin_lock_init(&cfids->cfid_list_lock);
679  	INIT_LIST_HEAD(&cfids->entries);
680  
681  	INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker);
682  	queue_delayed_work(cifsiod_wq, &cfids->laundromat_work,
683  			   dir_cache_timeout * HZ);
684  
685  	return cfids;
686  }
687  
688  /*
689   * Called from tconInfoFree when we are tearing down the tcon.
690   * There are no active users or open files/directories at this point.
691   */
free_cached_dirs(struct cached_fids * cfids)692  void free_cached_dirs(struct cached_fids *cfids)
693  {
694  	struct cached_fid *cfid, *q;
695  	LIST_HEAD(entry);
696  
697  	if (cfids == NULL)
698  		return;
699  
700  	cancel_delayed_work_sync(&cfids->laundromat_work);
701  
702  	spin_lock(&cfids->cfid_list_lock);
703  	list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
704  		cfid->on_list = false;
705  		cfid->is_open = false;
706  		list_move(&cfid->entry, &entry);
707  	}
708  	spin_unlock(&cfids->cfid_list_lock);
709  
710  	list_for_each_entry_safe(cfid, q, &entry, entry) {
711  		list_del(&cfid->entry);
712  		free_cached_dir(cfid);
713  	}
714  
715  	kfree(cfids);
716  }
717