1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Functions to handle the cached directory entries
4  *
5  *  Copyright (c) 2022, Ronnie Sahlberg <lsahlber@redhat.com>
6  */
7 
8 #include <linux/namei.h>
9 #include "cifsglob.h"
10 #include "cifsproto.h"
11 #include "cifs_debug.h"
12 #include "smb2proto.h"
13 #include "cached_dir.h"
14 
15 static struct cached_fid *init_cached_dir(const char *path);
16 static void free_cached_dir(struct cached_fid *cfid);
17 static void smb2_close_cached_fid(struct kref *ref);
18 static void cfids_laundromat_worker(struct work_struct *work);
19 
20 struct cached_dir_dentry {
21 	struct list_head entry;
22 	struct dentry *dentry;
23 };
24 
find_or_create_cached_dir(struct cached_fids * cfids,const char * path,bool lookup_only,__u32 max_cached_dirs)25 static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
26 						    const char *path,
27 						    bool lookup_only,
28 						    __u32 max_cached_dirs)
29 {
30 	struct cached_fid *cfid;
31 
32 	list_for_each_entry(cfid, &cfids->entries, entry) {
33 		if (!strcmp(cfid->path, path)) {
34 			/*
35 			 * If it doesn't have a lease it is either not yet
36 			 * fully cached or it may be in the process of
37 			 * being deleted due to a lease break.
38 			 */
39 			if (!cfid->time || !cfid->has_lease) {
40 				return NULL;
41 			}
42 			kref_get(&cfid->refcount);
43 			return cfid;
44 		}
45 	}
46 	if (lookup_only) {
47 		return NULL;
48 	}
49 	if (cfids->num_entries >= max_cached_dirs) {
50 		return NULL;
51 	}
52 	cfid = init_cached_dir(path);
53 	if (cfid == NULL) {
54 		return NULL;
55 	}
56 	cfid->cfids = cfids;
57 	cfids->num_entries++;
58 	list_add(&cfid->entry, &cfids->entries);
59 	cfid->on_list = true;
60 	kref_get(&cfid->refcount);
61 	/*
62 	 * Set @cfid->has_lease to true during construction so that the lease
63 	 * reference can be put in cached_dir_lease_break() due to a potential
64 	 * lease break right after the request is sent or while @cfid is still
65 	 * being cached, or if a reconnection is triggered during construction.
66 	 * Concurrent processes won't be to use it yet due to @cfid->time being
67 	 * zero.
68 	 */
69 	cfid->has_lease = true;
70 
71 	return cfid;
72 }
73 
74 static struct dentry *
path_to_dentry(struct cifs_sb_info * cifs_sb,const char * path)75 path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path)
76 {
77 	struct dentry *dentry;
78 	const char *s, *p;
79 	char sep;
80 
81 	sep = CIFS_DIR_SEP(cifs_sb);
82 	dentry = dget(cifs_sb->root);
83 	s = path;
84 
85 	do {
86 		struct inode *dir = d_inode(dentry);
87 		struct dentry *child;
88 
89 		if (!S_ISDIR(dir->i_mode)) {
90 			dput(dentry);
91 			dentry = ERR_PTR(-ENOTDIR);
92 			break;
93 		}
94 
95 		/* skip separators */
96 		while (*s == sep)
97 			s++;
98 		if (!*s)
99 			break;
100 		p = s++;
101 		/* next separator */
102 		while (*s && *s != sep)
103 			s++;
104 
105 		child = lookup_positive_unlocked(p, dentry, s - p);
106 		dput(dentry);
107 		dentry = child;
108 	} while (!IS_ERR(dentry));
109 	return dentry;
110 }
111 
path_no_prefix(struct cifs_sb_info * cifs_sb,const char * path)112 static const char *path_no_prefix(struct cifs_sb_info *cifs_sb,
113 				  const char *path)
114 {
115 	size_t len = 0;
116 
117 	if (!*path)
118 		return path;
119 
120 	if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) &&
121 	    cifs_sb->prepath) {
122 		len = strlen(cifs_sb->prepath) + 1;
123 		if (unlikely(len > strlen(path)))
124 			return ERR_PTR(-EINVAL);
125 	}
126 	return path + len;
127 }
128 
129 /*
130  * Open the and cache a directory handle.
131  * If error then *cfid is not initialized.
132  */
open_cached_dir(unsigned int xid,struct cifs_tcon * tcon,const char * path,struct cifs_sb_info * cifs_sb,bool lookup_only,struct cached_fid ** ret_cfid)133 int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
134 		    const char *path,
135 		    struct cifs_sb_info *cifs_sb,
136 		    bool lookup_only, struct cached_fid **ret_cfid)
137 {
138 	struct cifs_ses *ses;
139 	struct TCP_Server_Info *server;
140 	struct cifs_open_parms oparms;
141 	struct smb2_create_rsp *o_rsp = NULL;
142 	struct smb2_query_info_rsp *qi_rsp = NULL;
143 	int resp_buftype[2];
144 	struct smb_rqst rqst[2];
145 	struct kvec rsp_iov[2];
146 	struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
147 	struct kvec qi_iov[1];
148 	int rc, flags = 0;
149 	__le16 *utf16_path = NULL;
150 	u8 oplock = SMB2_OPLOCK_LEVEL_II;
151 	struct cifs_fid *pfid;
152 	struct dentry *dentry = NULL;
153 	struct cached_fid *cfid;
154 	struct cached_fids *cfids;
155 	const char *npath;
156 	int retries = 0, cur_sleep = 1;
157 
158 	if (cifs_sb->root == NULL)
159 		return -ENOENT;
160 
161 	if (tcon == NULL)
162 		return -EOPNOTSUPP;
163 
164 	ses = tcon->ses;
165 	cfids = tcon->cfids;
166 
167 	if (cfids == NULL)
168 		return -EOPNOTSUPP;
169 
170 replay_again:
171 	/* reinitialize for possible replay */
172 	flags = 0;
173 	oplock = SMB2_OPLOCK_LEVEL_II;
174 	server = cifs_pick_channel(ses);
175 
176 	if (!server->ops->new_lease_key)
177 		return -EIO;
178 
179 	utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
180 	if (!utf16_path)
181 		return -ENOMEM;
182 
183 	spin_lock(&cfids->cfid_list_lock);
184 	cfid = find_or_create_cached_dir(cfids, path, lookup_only, tcon->max_cached_dirs);
185 	if (cfid == NULL) {
186 		spin_unlock(&cfids->cfid_list_lock);
187 		kfree(utf16_path);
188 		return -ENOENT;
189 	}
190 	/*
191 	 * Return cached fid if it is valid (has a lease and has a time).
192 	 * Otherwise, it is either a new entry or laundromat worker removed it
193 	 * from @cfids->entries.  Caller will put last reference if the latter.
194 	 */
195 	if (cfid->has_lease && cfid->time) {
196 		spin_unlock(&cfids->cfid_list_lock);
197 		*ret_cfid = cfid;
198 		kfree(utf16_path);
199 		return 0;
200 	}
201 	spin_unlock(&cfids->cfid_list_lock);
202 
203 	/*
204 	 * Skip any prefix paths in @path as lookup_positive_unlocked() ends up
205 	 * calling ->lookup() which already adds those through
206 	 * build_path_from_dentry().  Also, do it earlier as we might reconnect
207 	 * below when trying to send compounded request and then potentially
208 	 * having a different prefix path (e.g. after DFS failover).
209 	 */
210 	npath = path_no_prefix(cifs_sb, path);
211 	if (IS_ERR(npath)) {
212 		rc = PTR_ERR(npath);
213 		goto out;
214 	}
215 
216 	if (!npath[0]) {
217 		dentry = dget(cifs_sb->root);
218 	} else {
219 		dentry = path_to_dentry(cifs_sb, npath);
220 		if (IS_ERR(dentry)) {
221 			rc = -ENOENT;
222 			goto out;
223 		}
224 	}
225 	cfid->dentry = dentry;
226 	cfid->tcon = tcon;
227 
228 	/*
229 	 * We do not hold the lock for the open because in case
230 	 * SMB2_open needs to reconnect.
231 	 * This is safe because no other thread will be able to get a ref
232 	 * to the cfid until we have finished opening the file and (possibly)
233 	 * acquired a lease.
234 	 */
235 	if (smb3_encryption_required(tcon))
236 		flags |= CIFS_TRANSFORM_REQ;
237 
238 	pfid = &cfid->fid;
239 	server->ops->new_lease_key(pfid);
240 
241 	memset(rqst, 0, sizeof(rqst));
242 	resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
243 	memset(rsp_iov, 0, sizeof(rsp_iov));
244 
245 	/* Open */
246 	memset(&open_iov, 0, sizeof(open_iov));
247 	rqst[0].rq_iov = open_iov;
248 	rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
249 
250 	oparms = (struct cifs_open_parms) {
251 		.tcon = tcon,
252 		.path = path,
253 		.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE),
254 		.desired_access =  FILE_READ_DATA | FILE_READ_ATTRIBUTES |
255 				   FILE_READ_EA,
256 		.disposition = FILE_OPEN,
257 		.fid = pfid,
258 		.replay = !!(retries),
259 	};
260 
261 	rc = SMB2_open_init(tcon, server,
262 			    &rqst[0], &oplock, &oparms, utf16_path);
263 	if (rc)
264 		goto oshr_free;
265 	smb2_set_next_command(tcon, &rqst[0]);
266 
267 	memset(&qi_iov, 0, sizeof(qi_iov));
268 	rqst[1].rq_iov = qi_iov;
269 	rqst[1].rq_nvec = 1;
270 
271 	rc = SMB2_query_info_init(tcon, server,
272 				  &rqst[1], COMPOUND_FID,
273 				  COMPOUND_FID, FILE_ALL_INFORMATION,
274 				  SMB2_O_INFO_FILE, 0,
275 				  sizeof(struct smb2_file_all_info) +
276 				  PATH_MAX * 2, 0, NULL);
277 	if (rc)
278 		goto oshr_free;
279 
280 	smb2_set_related(&rqst[1]);
281 
282 	if (retries) {
283 		smb2_set_replay(server, &rqst[0]);
284 		smb2_set_replay(server, &rqst[1]);
285 	}
286 
287 	rc = compound_send_recv(xid, ses, server,
288 				flags, 2, rqst,
289 				resp_buftype, rsp_iov);
290 	if (rc) {
291 		if (rc == -EREMCHG) {
292 			tcon->need_reconnect = true;
293 			pr_warn_once("server share %s deleted\n",
294 				     tcon->tree_name);
295 		}
296 		goto oshr_free;
297 	}
298 	cfid->is_open = true;
299 
300 	spin_lock(&cfids->cfid_list_lock);
301 
302 	o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
303 	oparms.fid->persistent_fid = o_rsp->PersistentFileId;
304 	oparms.fid->volatile_fid = o_rsp->VolatileFileId;
305 #ifdef CONFIG_CIFS_DEBUG2
306 	oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
307 #endif /* CIFS_DEBUG2 */
308 
309 
310 	if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) {
311 		spin_unlock(&cfids->cfid_list_lock);
312 		rc = -EINVAL;
313 		goto oshr_free;
314 	}
315 
316 	rc = smb2_parse_contexts(server, rsp_iov,
317 				 &oparms.fid->epoch,
318 				 oparms.fid->lease_key,
319 				 &oplock, NULL, NULL);
320 	if (rc) {
321 		spin_unlock(&cfids->cfid_list_lock);
322 		goto oshr_free;
323 	}
324 
325 	rc = -EINVAL;
326 	if (!(oplock & SMB2_LEASE_READ_CACHING_HE)) {
327 		spin_unlock(&cfids->cfid_list_lock);
328 		goto oshr_free;
329 	}
330 	qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
331 	if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) {
332 		spin_unlock(&cfids->cfid_list_lock);
333 		goto oshr_free;
334 	}
335 	if (!smb2_validate_and_copy_iov(
336 				le16_to_cpu(qi_rsp->OutputBufferOffset),
337 				sizeof(struct smb2_file_all_info),
338 				&rsp_iov[1], sizeof(struct smb2_file_all_info),
339 				(char *)&cfid->file_all_info))
340 		cfid->file_all_info_is_valid = true;
341 
342 	cfid->time = jiffies;
343 	spin_unlock(&cfids->cfid_list_lock);
344 	/* At this point the directory handle is fully cached */
345 	rc = 0;
346 
347 oshr_free:
348 	SMB2_open_free(&rqst[0]);
349 	SMB2_query_info_free(&rqst[1]);
350 	free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
351 	free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
352 out:
353 	if (rc) {
354 		spin_lock(&cfids->cfid_list_lock);
355 		if (cfid->on_list) {
356 			list_del(&cfid->entry);
357 			cfid->on_list = false;
358 			cfids->num_entries--;
359 		}
360 		if (cfid->has_lease) {
361 			/*
362 			 * We are guaranteed to have two references at this
363 			 * point. One for the caller and one for a potential
364 			 * lease. Release one here, and the second below.
365 			 */
366 			cfid->has_lease = false;
367 			kref_put(&cfid->refcount, smb2_close_cached_fid);
368 		}
369 		spin_unlock(&cfids->cfid_list_lock);
370 
371 		kref_put(&cfid->refcount, smb2_close_cached_fid);
372 	} else {
373 		*ret_cfid = cfid;
374 		atomic_inc(&tcon->num_remote_opens);
375 	}
376 	kfree(utf16_path);
377 
378 	if (is_replayable_error(rc) &&
379 	    smb2_should_replay(tcon, &retries, &cur_sleep))
380 		goto replay_again;
381 
382 	return rc;
383 }
384 
open_cached_dir_by_dentry(struct cifs_tcon * tcon,struct dentry * dentry,struct cached_fid ** ret_cfid)385 int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
386 			      struct dentry *dentry,
387 			      struct cached_fid **ret_cfid)
388 {
389 	struct cached_fid *cfid;
390 	struct cached_fids *cfids = tcon->cfids;
391 
392 	if (cfids == NULL)
393 		return -EOPNOTSUPP;
394 
395 	spin_lock(&cfids->cfid_list_lock);
396 	list_for_each_entry(cfid, &cfids->entries, entry) {
397 		if (dentry && cfid->dentry == dentry) {
398 			cifs_dbg(FYI, "found a cached file handle by dentry\n");
399 			kref_get(&cfid->refcount);
400 			*ret_cfid = cfid;
401 			spin_unlock(&cfids->cfid_list_lock);
402 			return 0;
403 		}
404 	}
405 	spin_unlock(&cfids->cfid_list_lock);
406 	return -ENOENT;
407 }
408 
409 static void
smb2_close_cached_fid(struct kref * ref)410 smb2_close_cached_fid(struct kref *ref)
411 {
412 	struct cached_fid *cfid = container_of(ref, struct cached_fid,
413 					       refcount);
414 	int rc;
415 
416 	spin_lock(&cfid->cfids->cfid_list_lock);
417 	if (cfid->on_list) {
418 		list_del(&cfid->entry);
419 		cfid->on_list = false;
420 		cfid->cfids->num_entries--;
421 	}
422 	spin_unlock(&cfid->cfids->cfid_list_lock);
423 
424 	dput(cfid->dentry);
425 	cfid->dentry = NULL;
426 
427 	if (cfid->is_open) {
428 		rc = SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
429 			   cfid->fid.volatile_fid);
430 		if (rc) /* should we retry on -EBUSY or -EAGAIN? */
431 			cifs_dbg(VFS, "close cached dir rc %d\n", rc);
432 	}
433 
434 	free_cached_dir(cfid);
435 }
436 
drop_cached_dir_by_name(const unsigned int xid,struct cifs_tcon * tcon,const char * name,struct cifs_sb_info * cifs_sb)437 void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon,
438 			     const char *name, struct cifs_sb_info *cifs_sb)
439 {
440 	struct cached_fid *cfid = NULL;
441 	int rc;
442 
443 	rc = open_cached_dir(xid, tcon, name, cifs_sb, true, &cfid);
444 	if (rc) {
445 		cifs_dbg(FYI, "no cached dir found for rmdir(%s)\n", name);
446 		return;
447 	}
448 	spin_lock(&cfid->cfids->cfid_list_lock);
449 	if (cfid->has_lease) {
450 		cfid->has_lease = false;
451 		kref_put(&cfid->refcount, smb2_close_cached_fid);
452 	}
453 	spin_unlock(&cfid->cfids->cfid_list_lock);
454 	close_cached_dir(cfid);
455 }
456 
457 
close_cached_dir(struct cached_fid * cfid)458 void close_cached_dir(struct cached_fid *cfid)
459 {
460 	kref_put(&cfid->refcount, smb2_close_cached_fid);
461 }
462 
463 /*
464  * Called from cifs_kill_sb when we unmount a share
465  */
close_all_cached_dirs(struct cifs_sb_info * cifs_sb)466 void close_all_cached_dirs(struct cifs_sb_info *cifs_sb)
467 {
468 	struct rb_root *root = &cifs_sb->tlink_tree;
469 	struct rb_node *node;
470 	struct cached_fid *cfid;
471 	struct cifs_tcon *tcon;
472 	struct tcon_link *tlink;
473 	struct cached_fids *cfids;
474 	struct cached_dir_dentry *tmp_list, *q;
475 	LIST_HEAD(entry);
476 
477 	spin_lock(&cifs_sb->tlink_tree_lock);
478 	for (node = rb_first(root); node; node = rb_next(node)) {
479 		tlink = rb_entry(node, struct tcon_link, tl_rbnode);
480 		tcon = tlink_tcon(tlink);
481 		if (IS_ERR(tcon))
482 			continue;
483 		cfids = tcon->cfids;
484 		if (cfids == NULL)
485 			continue;
486 		spin_lock(&cfids->cfid_list_lock);
487 		list_for_each_entry(cfid, &cfids->entries, entry) {
488 			tmp_list = kmalloc(sizeof(*tmp_list), GFP_ATOMIC);
489 			if (tmp_list == NULL)
490 				break;
491 			spin_lock(&cfid->fid_lock);
492 			tmp_list->dentry = cfid->dentry;
493 			cfid->dentry = NULL;
494 			spin_unlock(&cfid->fid_lock);
495 
496 			list_add_tail(&tmp_list->entry, &entry);
497 		}
498 		spin_unlock(&cfids->cfid_list_lock);
499 	}
500 	spin_unlock(&cifs_sb->tlink_tree_lock);
501 
502 	list_for_each_entry_safe(tmp_list, q, &entry, entry) {
503 		list_del(&tmp_list->entry);
504 		dput(tmp_list->dentry);
505 		kfree(tmp_list);
506 	}
507 
508 	/* Flush any pending work that will drop dentries */
509 	flush_workqueue(cfid_put_wq);
510 }
511 
512 /*
513  * Invalidate all cached dirs when a TCON has been reset
514  * due to a session loss.
515  */
invalidate_all_cached_dirs(struct cifs_tcon * tcon)516 void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
517 {
518 	struct cached_fids *cfids = tcon->cfids;
519 	struct cached_fid *cfid, *q;
520 
521 	if (cfids == NULL)
522 		return;
523 
524 	/*
525 	 * Mark all the cfids as closed, and move them to the cfids->dying list.
526 	 * They'll be cleaned up later by cfids_invalidation_worker. Take
527 	 * a reference to each cfid during this process.
528 	 */
529 	spin_lock(&cfids->cfid_list_lock);
530 	list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
531 		list_move(&cfid->entry, &cfids->dying);
532 		cfids->num_entries--;
533 		cfid->is_open = false;
534 		cfid->on_list = false;
535 		if (cfid->has_lease) {
536 			/*
537 			 * The lease was never cancelled from the server,
538 			 * so steal that reference.
539 			 */
540 			cfid->has_lease = false;
541 		} else
542 			kref_get(&cfid->refcount);
543 	}
544 	/*
545 	 * Queue dropping of the dentries once locks have been dropped
546 	 */
547 	if (!list_empty(&cfids->dying))
548 		queue_work(cfid_put_wq, &cfids->invalidation_work);
549 	spin_unlock(&cfids->cfid_list_lock);
550 }
551 
552 static void
cached_dir_offload_close(struct work_struct * work)553 cached_dir_offload_close(struct work_struct *work)
554 {
555 	struct cached_fid *cfid = container_of(work,
556 				struct cached_fid, close_work);
557 	struct cifs_tcon *tcon = cfid->tcon;
558 
559 	WARN_ON(cfid->on_list);
560 
561 	kref_put(&cfid->refcount, smb2_close_cached_fid);
562 	cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cached_close);
563 }
564 
565 /*
566  * Release the cached directory's dentry, and then queue work to drop cached
567  * directory itself (closing on server if needed).
568  *
569  * Must be called with a reference to the cached_fid and a reference to the
570  * tcon.
571  */
cached_dir_put_work(struct work_struct * work)572 static void cached_dir_put_work(struct work_struct *work)
573 {
574 	struct cached_fid *cfid = container_of(work, struct cached_fid,
575 					       put_work);
576 	struct dentry *dentry;
577 
578 	spin_lock(&cfid->fid_lock);
579 	dentry = cfid->dentry;
580 	cfid->dentry = NULL;
581 	spin_unlock(&cfid->fid_lock);
582 
583 	dput(dentry);
584 	queue_work(serverclose_wq, &cfid->close_work);
585 }
586 
cached_dir_lease_break(struct cifs_tcon * tcon,__u8 lease_key[16])587 int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16])
588 {
589 	struct cached_fids *cfids = tcon->cfids;
590 	struct cached_fid *cfid;
591 
592 	if (cfids == NULL)
593 		return false;
594 
595 	spin_lock(&cfids->cfid_list_lock);
596 	list_for_each_entry(cfid, &cfids->entries, entry) {
597 		if (cfid->has_lease &&
598 		    !memcmp(lease_key,
599 			    cfid->fid.lease_key,
600 			    SMB2_LEASE_KEY_SIZE)) {
601 			cfid->has_lease = false;
602 			cfid->time = 0;
603 			/*
604 			 * We found a lease remove it from the list
605 			 * so no threads can access it.
606 			 */
607 			list_del(&cfid->entry);
608 			cfid->on_list = false;
609 			cfids->num_entries--;
610 
611 			++tcon->tc_count;
612 			trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count,
613 					    netfs_trace_tcon_ref_get_cached_lease_break);
614 			queue_work(cfid_put_wq, &cfid->put_work);
615 			spin_unlock(&cfids->cfid_list_lock);
616 			return true;
617 		}
618 	}
619 	spin_unlock(&cfids->cfid_list_lock);
620 	return false;
621 }
622 
init_cached_dir(const char * path)623 static struct cached_fid *init_cached_dir(const char *path)
624 {
625 	struct cached_fid *cfid;
626 
627 	cfid = kzalloc(sizeof(*cfid), GFP_ATOMIC);
628 	if (!cfid)
629 		return NULL;
630 	cfid->path = kstrdup(path, GFP_ATOMIC);
631 	if (!cfid->path) {
632 		kfree(cfid);
633 		return NULL;
634 	}
635 
636 	INIT_WORK(&cfid->close_work, cached_dir_offload_close);
637 	INIT_WORK(&cfid->put_work, cached_dir_put_work);
638 	INIT_LIST_HEAD(&cfid->entry);
639 	INIT_LIST_HEAD(&cfid->dirents.entries);
640 	mutex_init(&cfid->dirents.de_mutex);
641 	spin_lock_init(&cfid->fid_lock);
642 	kref_init(&cfid->refcount);
643 	return cfid;
644 }
645 
free_cached_dir(struct cached_fid * cfid)646 static void free_cached_dir(struct cached_fid *cfid)
647 {
648 	struct cached_dirent *dirent, *q;
649 
650 	WARN_ON(work_pending(&cfid->close_work));
651 	WARN_ON(work_pending(&cfid->put_work));
652 
653 	dput(cfid->dentry);
654 	cfid->dentry = NULL;
655 
656 	/*
657 	 * Delete all cached dirent names
658 	 */
659 	list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) {
660 		list_del(&dirent->entry);
661 		kfree(dirent->name);
662 		kfree(dirent);
663 	}
664 
665 	kfree(cfid->path);
666 	cfid->path = NULL;
667 	kfree(cfid);
668 }
669 
cfids_invalidation_worker(struct work_struct * work)670 static void cfids_invalidation_worker(struct work_struct *work)
671 {
672 	struct cached_fids *cfids = container_of(work, struct cached_fids,
673 						 invalidation_work);
674 	struct cached_fid *cfid, *q;
675 	LIST_HEAD(entry);
676 
677 	spin_lock(&cfids->cfid_list_lock);
678 	/* move cfids->dying to the local list */
679 	list_cut_before(&entry, &cfids->dying, &cfids->dying);
680 	spin_unlock(&cfids->cfid_list_lock);
681 
682 	list_for_each_entry_safe(cfid, q, &entry, entry) {
683 		list_del(&cfid->entry);
684 		/* Drop the ref-count acquired in invalidate_all_cached_dirs */
685 		kref_put(&cfid->refcount, smb2_close_cached_fid);
686 	}
687 }
688 
cfids_laundromat_worker(struct work_struct * work)689 static void cfids_laundromat_worker(struct work_struct *work)
690 {
691 	struct cached_fids *cfids;
692 	struct cached_fid *cfid, *q;
693 	struct dentry *dentry;
694 	LIST_HEAD(entry);
695 
696 	cfids = container_of(work, struct cached_fids, laundromat_work.work);
697 
698 	spin_lock(&cfids->cfid_list_lock);
699 	list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
700 		if (cfid->time &&
701 		    time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) {
702 			cfid->on_list = false;
703 			list_move(&cfid->entry, &entry);
704 			cfids->num_entries--;
705 			if (cfid->has_lease) {
706 				/*
707 				 * Our lease has not yet been cancelled from the
708 				 * server. Steal that reference.
709 				 */
710 				cfid->has_lease = false;
711 			} else
712 				kref_get(&cfid->refcount);
713 		}
714 	}
715 	spin_unlock(&cfids->cfid_list_lock);
716 
717 	list_for_each_entry_safe(cfid, q, &entry, entry) {
718 		list_del(&cfid->entry);
719 
720 		spin_lock(&cfid->fid_lock);
721 		dentry = cfid->dentry;
722 		cfid->dentry = NULL;
723 		spin_unlock(&cfid->fid_lock);
724 
725 		dput(dentry);
726 		if (cfid->is_open) {
727 			spin_lock(&cifs_tcp_ses_lock);
728 			++cfid->tcon->tc_count;
729 			trace_smb3_tcon_ref(cfid->tcon->debug_id, cfid->tcon->tc_count,
730 					    netfs_trace_tcon_ref_get_cached_laundromat);
731 			spin_unlock(&cifs_tcp_ses_lock);
732 			queue_work(serverclose_wq, &cfid->close_work);
733 		} else
734 			/*
735 			 * Drop the ref-count from above, either the lease-ref (if there
736 			 * was one) or the extra one acquired.
737 			 */
738 			kref_put(&cfid->refcount, smb2_close_cached_fid);
739 	}
740 	queue_delayed_work(cfid_put_wq, &cfids->laundromat_work,
741 			   dir_cache_timeout * HZ);
742 }
743 
init_cached_dirs(void)744 struct cached_fids *init_cached_dirs(void)
745 {
746 	struct cached_fids *cfids;
747 
748 	cfids = kzalloc(sizeof(*cfids), GFP_KERNEL);
749 	if (!cfids)
750 		return NULL;
751 	spin_lock_init(&cfids->cfid_list_lock);
752 	INIT_LIST_HEAD(&cfids->entries);
753 	INIT_LIST_HEAD(&cfids->dying);
754 
755 	INIT_WORK(&cfids->invalidation_work, cfids_invalidation_worker);
756 	INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker);
757 	queue_delayed_work(cfid_put_wq, &cfids->laundromat_work,
758 			   dir_cache_timeout * HZ);
759 
760 	return cfids;
761 }
762 
763 /*
764  * Called from tconInfoFree when we are tearing down the tcon.
765  * There are no active users or open files/directories at this point.
766  */
free_cached_dirs(struct cached_fids * cfids)767 void free_cached_dirs(struct cached_fids *cfids)
768 {
769 	struct cached_fid *cfid, *q;
770 	LIST_HEAD(entry);
771 
772 	if (cfids == NULL)
773 		return;
774 
775 	cancel_delayed_work_sync(&cfids->laundromat_work);
776 	cancel_work_sync(&cfids->invalidation_work);
777 
778 	spin_lock(&cfids->cfid_list_lock);
779 	list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
780 		cfid->on_list = false;
781 		cfid->is_open = false;
782 		list_move(&cfid->entry, &entry);
783 	}
784 	list_for_each_entry_safe(cfid, q, &cfids->dying, entry) {
785 		cfid->on_list = false;
786 		cfid->is_open = false;
787 		list_move(&cfid->entry, &entry);
788 	}
789 	spin_unlock(&cfids->cfid_list_lock);
790 
791 	list_for_each_entry_safe(cfid, q, &entry, entry) {
792 		list_del(&cfid->entry);
793 		free_cached_dir(cfid);
794 	}
795 
796 	kfree(cfids);
797 }
798