1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Functions to handle the cached directory entries 4 * 5 * Copyright (c) 2022, Ronnie Sahlberg <lsahlber@redhat.com> 6 */ 7 8 #include <linux/namei.h> 9 #include "cifsglob.h" 10 #include "cifsproto.h" 11 #include "cifs_debug.h" 12 #include "smb2proto.h" 13 #include "cached_dir.h" 14 15 static struct cached_fid *init_cached_dir(const char *path); 16 static void free_cached_dir(struct cached_fid *cfid); 17 static void smb2_close_cached_fid(struct kref *ref); 18 static void cfids_laundromat_worker(struct work_struct *work); 19 20 struct cached_dir_dentry { 21 struct list_head entry; 22 struct dentry *dentry; 23 }; 24 25 static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids, 26 const char *path, 27 bool lookup_only, 28 __u32 max_cached_dirs) 29 { 30 struct cached_fid *cfid; 31 32 list_for_each_entry(cfid, &cfids->entries, entry) { 33 if (!strcmp(cfid->path, path)) { 34 /* 35 * If it doesn't have a lease it is either not yet 36 * fully cached or it may be in the process of 37 * being deleted due to a lease break. 38 */ 39 if (!cfid->time || !cfid->has_lease) { 40 return NULL; 41 } 42 kref_get(&cfid->refcount); 43 return cfid; 44 } 45 } 46 if (lookup_only) { 47 return NULL; 48 } 49 if (cfids->num_entries >= max_cached_dirs) { 50 return NULL; 51 } 52 cfid = init_cached_dir(path); 53 if (cfid == NULL) { 54 return NULL; 55 } 56 cfid->cfids = cfids; 57 cfids->num_entries++; 58 list_add(&cfid->entry, &cfids->entries); 59 cfid->on_list = true; 60 kref_get(&cfid->refcount); 61 /* 62 * Set @cfid->has_lease to true during construction so that the lease 63 * reference can be put in cached_dir_lease_break() due to a potential 64 * lease break right after the request is sent or while @cfid is still 65 * being cached, or if a reconnection is triggered during construction. 66 * Concurrent processes won't be to use it yet due to @cfid->time being 67 * zero. 68 */ 69 cfid->has_lease = true; 70 71 return cfid; 72 } 73 74 static struct dentry * 75 path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path) 76 { 77 struct dentry *dentry; 78 const char *s, *p; 79 char sep; 80 81 sep = CIFS_DIR_SEP(cifs_sb); 82 dentry = dget(cifs_sb->root); 83 s = path; 84 85 do { 86 struct inode *dir = d_inode(dentry); 87 struct dentry *child; 88 89 if (!S_ISDIR(dir->i_mode)) { 90 dput(dentry); 91 dentry = ERR_PTR(-ENOTDIR); 92 break; 93 } 94 95 /* skip separators */ 96 while (*s == sep) 97 s++; 98 if (!*s) 99 break; 100 p = s++; 101 /* next separator */ 102 while (*s && *s != sep) 103 s++; 104 105 child = lookup_noperm_positive_unlocked(&QSTR_LEN(p, s - p), 106 dentry); 107 dput(dentry); 108 dentry = child; 109 } while (!IS_ERR(dentry)); 110 return dentry; 111 } 112 113 static const char *path_no_prefix(struct cifs_sb_info *cifs_sb, 114 const char *path) 115 { 116 size_t len = 0; 117 118 if (!*path) 119 return path; 120 121 if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_USE_PREFIX_PATH) && 122 cifs_sb->prepath) { 123 len = strlen(cifs_sb->prepath) + 1; 124 if (unlikely(len > strlen(path))) 125 return ERR_PTR(-EINVAL); 126 } 127 return path + len; 128 } 129 130 /* 131 * Open the and cache a directory handle. 132 * If error then *cfid is not initialized. 133 */ 134 int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon, 135 const char *path, 136 struct cifs_sb_info *cifs_sb, 137 bool lookup_only, struct cached_fid **ret_cfid) 138 { 139 struct cifs_ses *ses; 140 struct TCP_Server_Info *server; 141 struct cifs_open_parms oparms; 142 struct smb2_create_rsp *o_rsp = NULL; 143 struct smb2_query_info_rsp *qi_rsp = NULL; 144 int resp_buftype[2]; 145 struct smb_rqst rqst[2]; 146 struct kvec rsp_iov[2]; 147 struct kvec open_iov[SMB2_CREATE_IOV_SIZE]; 148 struct kvec qi_iov[1]; 149 int rc, flags = 0; 150 __le16 *utf16_path = NULL; 151 u8 oplock = SMB2_OPLOCK_LEVEL_II; 152 struct cifs_fid *pfid; 153 struct dentry *dentry = NULL; 154 struct cached_fid *cfid; 155 struct cached_fids *cfids; 156 const char *npath; 157 int retries = 0, cur_sleep = 1; 158 __le32 lease_flags = 0; 159 160 if (cifs_sb->root == NULL) 161 return -ENOENT; 162 163 if (tcon == NULL) 164 return -EOPNOTSUPP; 165 166 ses = tcon->ses; 167 cfids = tcon->cfids; 168 169 if (cfids == NULL) 170 return -EOPNOTSUPP; 171 172 replay_again: 173 /* reinitialize for possible replay */ 174 flags = 0; 175 oplock = SMB2_OPLOCK_LEVEL_II; 176 server = cifs_pick_channel(ses); 177 178 if (!server->ops->new_lease_key) 179 return -EIO; 180 181 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb); 182 if (!utf16_path) 183 return -ENOMEM; 184 185 spin_lock(&cfids->cfid_list_lock); 186 cfid = find_or_create_cached_dir(cfids, path, lookup_only, tcon->max_cached_dirs); 187 if (cfid == NULL) { 188 spin_unlock(&cfids->cfid_list_lock); 189 kfree(utf16_path); 190 return -ENOENT; 191 } 192 /* 193 * Return cached fid if it is valid (has a lease and has a time). 194 * Otherwise, it is either a new entry or laundromat worker removed it 195 * from @cfids->entries. Caller will put last reference if the latter. 196 */ 197 if (cfid->has_lease && cfid->time) { 198 spin_unlock(&cfids->cfid_list_lock); 199 *ret_cfid = cfid; 200 kfree(utf16_path); 201 return 0; 202 } 203 spin_unlock(&cfids->cfid_list_lock); 204 205 pfid = &cfid->fid; 206 207 /* 208 * Skip any prefix paths in @path as lookup_noperm_positive_unlocked() ends up 209 * calling ->lookup() which already adds those through 210 * build_path_from_dentry(). Also, do it earlier as we might reconnect 211 * below when trying to send compounded request and then potentially 212 * having a different prefix path (e.g. after DFS failover). 213 */ 214 npath = path_no_prefix(cifs_sb, path); 215 if (IS_ERR(npath)) { 216 rc = PTR_ERR(npath); 217 goto out; 218 } 219 220 if (!npath[0]) { 221 dentry = dget(cifs_sb->root); 222 } else { 223 dentry = path_to_dentry(cifs_sb, npath); 224 if (IS_ERR(dentry)) { 225 rc = -ENOENT; 226 goto out; 227 } 228 if (dentry->d_parent && server->dialect >= SMB30_PROT_ID) { 229 struct cached_fid *parent_cfid; 230 231 spin_lock(&cfids->cfid_list_lock); 232 list_for_each_entry(parent_cfid, &cfids->entries, entry) { 233 if (parent_cfid->dentry == dentry->d_parent) { 234 cifs_dbg(FYI, "found a parent cached file handle\n"); 235 if (parent_cfid->has_lease && parent_cfid->time) { 236 lease_flags 237 |= SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE; 238 memcpy(pfid->parent_lease_key, 239 parent_cfid->fid.lease_key, 240 SMB2_LEASE_KEY_SIZE); 241 } 242 break; 243 } 244 } 245 spin_unlock(&cfids->cfid_list_lock); 246 } 247 } 248 cfid->dentry = dentry; 249 cfid->tcon = tcon; 250 251 /* 252 * We do not hold the lock for the open because in case 253 * SMB2_open needs to reconnect. 254 * This is safe because no other thread will be able to get a ref 255 * to the cfid until we have finished opening the file and (possibly) 256 * acquired a lease. 257 */ 258 if (smb3_encryption_required(tcon)) 259 flags |= CIFS_TRANSFORM_REQ; 260 261 server->ops->new_lease_key(pfid); 262 263 memset(rqst, 0, sizeof(rqst)); 264 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER; 265 memset(rsp_iov, 0, sizeof(rsp_iov)); 266 267 /* Open */ 268 memset(&open_iov, 0, sizeof(open_iov)); 269 rqst[0].rq_iov = open_iov; 270 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE; 271 272 oparms = (struct cifs_open_parms) { 273 .tcon = tcon, 274 .path = path, 275 .create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE), 276 .desired_access = FILE_READ_DATA | FILE_READ_ATTRIBUTES | 277 FILE_READ_EA, 278 .disposition = FILE_OPEN, 279 .fid = pfid, 280 .lease_flags = lease_flags, 281 .replay = !!(retries), 282 }; 283 284 rc = SMB2_open_init(tcon, server, 285 &rqst[0], &oplock, &oparms, utf16_path); 286 if (rc) 287 goto oshr_free; 288 smb2_set_next_command(tcon, &rqst[0]); 289 290 memset(&qi_iov, 0, sizeof(qi_iov)); 291 rqst[1].rq_iov = qi_iov; 292 rqst[1].rq_nvec = 1; 293 294 rc = SMB2_query_info_init(tcon, server, 295 &rqst[1], COMPOUND_FID, 296 COMPOUND_FID, FILE_ALL_INFORMATION, 297 SMB2_O_INFO_FILE, 0, 298 sizeof(struct smb2_file_all_info) + 299 PATH_MAX * 2, 0, NULL); 300 if (rc) 301 goto oshr_free; 302 303 smb2_set_related(&rqst[1]); 304 305 if (retries) { 306 smb2_set_replay(server, &rqst[0]); 307 smb2_set_replay(server, &rqst[1]); 308 } 309 310 rc = compound_send_recv(xid, ses, server, 311 flags, 2, rqst, 312 resp_buftype, rsp_iov); 313 if (rc) { 314 if (rc == -EREMCHG) { 315 tcon->need_reconnect = true; 316 pr_warn_once("server share %s deleted\n", 317 tcon->tree_name); 318 } 319 goto oshr_free; 320 } 321 cfid->is_open = true; 322 323 spin_lock(&cfids->cfid_list_lock); 324 325 o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base; 326 oparms.fid->persistent_fid = o_rsp->PersistentFileId; 327 oparms.fid->volatile_fid = o_rsp->VolatileFileId; 328 #ifdef CONFIG_CIFS_DEBUG2 329 oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId); 330 #endif /* CIFS_DEBUG2 */ 331 332 333 if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE) { 334 spin_unlock(&cfids->cfid_list_lock); 335 rc = -EINVAL; 336 goto oshr_free; 337 } 338 339 rc = smb2_parse_contexts(server, rsp_iov, 340 &oparms.fid->epoch, 341 oparms.fid->lease_key, 342 &oplock, NULL, NULL); 343 if (rc) { 344 spin_unlock(&cfids->cfid_list_lock); 345 goto oshr_free; 346 } 347 348 rc = -EINVAL; 349 if (!(oplock & SMB2_LEASE_READ_CACHING_HE)) { 350 spin_unlock(&cfids->cfid_list_lock); 351 goto oshr_free; 352 } 353 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base; 354 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info)) { 355 spin_unlock(&cfids->cfid_list_lock); 356 goto oshr_free; 357 } 358 if (!smb2_validate_and_copy_iov( 359 le16_to_cpu(qi_rsp->OutputBufferOffset), 360 sizeof(struct smb2_file_all_info), 361 &rsp_iov[1], sizeof(struct smb2_file_all_info), 362 (char *)&cfid->file_all_info)) 363 cfid->file_all_info_is_valid = true; 364 365 cfid->time = jiffies; 366 spin_unlock(&cfids->cfid_list_lock); 367 /* At this point the directory handle is fully cached */ 368 rc = 0; 369 370 oshr_free: 371 SMB2_open_free(&rqst[0]); 372 SMB2_query_info_free(&rqst[1]); 373 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base); 374 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base); 375 out: 376 if (rc) { 377 spin_lock(&cfids->cfid_list_lock); 378 if (cfid->on_list) { 379 list_del(&cfid->entry); 380 cfid->on_list = false; 381 cfids->num_entries--; 382 } 383 if (cfid->has_lease) { 384 /* 385 * We are guaranteed to have two references at this 386 * point. One for the caller and one for a potential 387 * lease. Release one here, and the second below. 388 */ 389 cfid->has_lease = false; 390 kref_put(&cfid->refcount, smb2_close_cached_fid); 391 } 392 spin_unlock(&cfids->cfid_list_lock); 393 394 kref_put(&cfid->refcount, smb2_close_cached_fid); 395 } else { 396 *ret_cfid = cfid; 397 atomic_inc(&tcon->num_remote_opens); 398 } 399 kfree(utf16_path); 400 401 if (is_replayable_error(rc) && 402 smb2_should_replay(tcon, &retries, &cur_sleep)) 403 goto replay_again; 404 405 return rc; 406 } 407 408 int open_cached_dir_by_dentry(struct cifs_tcon *tcon, 409 struct dentry *dentry, 410 struct cached_fid **ret_cfid) 411 { 412 struct cached_fid *cfid; 413 struct cached_fids *cfids = tcon->cfids; 414 415 if (cfids == NULL) 416 return -EOPNOTSUPP; 417 418 spin_lock(&cfids->cfid_list_lock); 419 list_for_each_entry(cfid, &cfids->entries, entry) { 420 if (dentry && cfid->dentry == dentry) { 421 cifs_dbg(FYI, "found a cached file handle by dentry\n"); 422 kref_get(&cfid->refcount); 423 *ret_cfid = cfid; 424 spin_unlock(&cfids->cfid_list_lock); 425 return 0; 426 } 427 } 428 spin_unlock(&cfids->cfid_list_lock); 429 return -ENOENT; 430 } 431 432 static void 433 smb2_close_cached_fid(struct kref *ref) 434 { 435 struct cached_fid *cfid = container_of(ref, struct cached_fid, 436 refcount); 437 int rc; 438 439 spin_lock(&cfid->cfids->cfid_list_lock); 440 if (cfid->on_list) { 441 list_del(&cfid->entry); 442 cfid->on_list = false; 443 cfid->cfids->num_entries--; 444 } 445 spin_unlock(&cfid->cfids->cfid_list_lock); 446 447 dput(cfid->dentry); 448 cfid->dentry = NULL; 449 450 if (cfid->is_open) { 451 rc = SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid, 452 cfid->fid.volatile_fid); 453 if (rc) /* should we retry on -EBUSY or -EAGAIN? */ 454 cifs_dbg(VFS, "close cached dir rc %d\n", rc); 455 } 456 457 free_cached_dir(cfid); 458 } 459 460 void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon, 461 const char *name, struct cifs_sb_info *cifs_sb) 462 { 463 struct cached_fid *cfid = NULL; 464 int rc; 465 466 rc = open_cached_dir(xid, tcon, name, cifs_sb, true, &cfid); 467 if (rc) { 468 cifs_dbg(FYI, "no cached dir found for rmdir(%s)\n", name); 469 return; 470 } 471 spin_lock(&cfid->cfids->cfid_list_lock); 472 if (cfid->has_lease) { 473 cfid->has_lease = false; 474 kref_put(&cfid->refcount, smb2_close_cached_fid); 475 } 476 spin_unlock(&cfid->cfids->cfid_list_lock); 477 close_cached_dir(cfid); 478 } 479 480 481 void close_cached_dir(struct cached_fid *cfid) 482 { 483 kref_put(&cfid->refcount, smb2_close_cached_fid); 484 } 485 486 /* 487 * Called from cifs_kill_sb when we unmount a share 488 */ 489 void close_all_cached_dirs(struct cifs_sb_info *cifs_sb) 490 { 491 struct rb_root *root = &cifs_sb->tlink_tree; 492 struct rb_node *node; 493 struct cached_fid *cfid; 494 struct cifs_tcon *tcon; 495 struct tcon_link *tlink; 496 struct cached_fids *cfids; 497 struct cached_dir_dentry *tmp_list, *q; 498 LIST_HEAD(entry); 499 500 spin_lock(&cifs_sb->tlink_tree_lock); 501 for (node = rb_first(root); node; node = rb_next(node)) { 502 tlink = rb_entry(node, struct tcon_link, tl_rbnode); 503 tcon = tlink_tcon(tlink); 504 if (IS_ERR(tcon)) 505 continue; 506 cfids = tcon->cfids; 507 if (cfids == NULL) 508 continue; 509 spin_lock(&cfids->cfid_list_lock); 510 list_for_each_entry(cfid, &cfids->entries, entry) { 511 tmp_list = kmalloc(sizeof(*tmp_list), GFP_ATOMIC); 512 if (tmp_list == NULL) { 513 /* 514 * If the malloc() fails, we won't drop all 515 * dentries, and unmounting is likely to trigger 516 * a 'Dentry still in use' error. 517 */ 518 cifs_tcon_dbg(VFS, "Out of memory while dropping dentries\n"); 519 spin_unlock(&cfids->cfid_list_lock); 520 spin_unlock(&cifs_sb->tlink_tree_lock); 521 goto done; 522 } 523 spin_lock(&cfid->fid_lock); 524 tmp_list->dentry = cfid->dentry; 525 cfid->dentry = NULL; 526 spin_unlock(&cfid->fid_lock); 527 528 list_add_tail(&tmp_list->entry, &entry); 529 } 530 spin_unlock(&cfids->cfid_list_lock); 531 } 532 spin_unlock(&cifs_sb->tlink_tree_lock); 533 534 done: 535 list_for_each_entry_safe(tmp_list, q, &entry, entry) { 536 list_del(&tmp_list->entry); 537 dput(tmp_list->dentry); 538 kfree(tmp_list); 539 } 540 541 /* Flush any pending work that will drop dentries */ 542 flush_workqueue(cfid_put_wq); 543 } 544 545 /* 546 * Invalidate all cached dirs when a TCON has been reset 547 * due to a session loss. 548 */ 549 void invalidate_all_cached_dirs(struct cifs_tcon *tcon) 550 { 551 struct cached_fids *cfids = tcon->cfids; 552 struct cached_fid *cfid, *q; 553 554 if (cfids == NULL) 555 return; 556 557 /* 558 * Mark all the cfids as closed, and move them to the cfids->dying list. 559 * They'll be cleaned up later by cfids_invalidation_worker. Take 560 * a reference to each cfid during this process. 561 */ 562 spin_lock(&cfids->cfid_list_lock); 563 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { 564 list_move(&cfid->entry, &cfids->dying); 565 cfids->num_entries--; 566 cfid->is_open = false; 567 cfid->on_list = false; 568 if (cfid->has_lease) { 569 /* 570 * The lease was never cancelled from the server, 571 * so steal that reference. 572 */ 573 cfid->has_lease = false; 574 } else 575 kref_get(&cfid->refcount); 576 } 577 /* 578 * Queue dropping of the dentries once locks have been dropped 579 */ 580 if (!list_empty(&cfids->dying)) 581 queue_work(cfid_put_wq, &cfids->invalidation_work); 582 spin_unlock(&cfids->cfid_list_lock); 583 } 584 585 static void 586 cached_dir_offload_close(struct work_struct *work) 587 { 588 struct cached_fid *cfid = container_of(work, 589 struct cached_fid, close_work); 590 struct cifs_tcon *tcon = cfid->tcon; 591 592 WARN_ON(cfid->on_list); 593 594 kref_put(&cfid->refcount, smb2_close_cached_fid); 595 cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cached_close); 596 } 597 598 /* 599 * Release the cached directory's dentry, and then queue work to drop cached 600 * directory itself (closing on server if needed). 601 * 602 * Must be called with a reference to the cached_fid and a reference to the 603 * tcon. 604 */ 605 static void cached_dir_put_work(struct work_struct *work) 606 { 607 struct cached_fid *cfid = container_of(work, struct cached_fid, 608 put_work); 609 struct dentry *dentry; 610 611 spin_lock(&cfid->fid_lock); 612 dentry = cfid->dentry; 613 cfid->dentry = NULL; 614 spin_unlock(&cfid->fid_lock); 615 616 dput(dentry); 617 queue_work(serverclose_wq, &cfid->close_work); 618 } 619 620 int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16]) 621 { 622 struct cached_fids *cfids = tcon->cfids; 623 struct cached_fid *cfid; 624 625 if (cfids == NULL) 626 return false; 627 628 spin_lock(&cfids->cfid_list_lock); 629 list_for_each_entry(cfid, &cfids->entries, entry) { 630 if (cfid->has_lease && 631 !memcmp(lease_key, 632 cfid->fid.lease_key, 633 SMB2_LEASE_KEY_SIZE)) { 634 cfid->has_lease = false; 635 cfid->time = 0; 636 /* 637 * We found a lease remove it from the list 638 * so no threads can access it. 639 */ 640 list_del(&cfid->entry); 641 cfid->on_list = false; 642 cfids->num_entries--; 643 644 ++tcon->tc_count; 645 trace_smb3_tcon_ref(tcon->debug_id, tcon->tc_count, 646 netfs_trace_tcon_ref_get_cached_lease_break); 647 queue_work(cfid_put_wq, &cfid->put_work); 648 spin_unlock(&cfids->cfid_list_lock); 649 return true; 650 } 651 } 652 spin_unlock(&cfids->cfid_list_lock); 653 return false; 654 } 655 656 static struct cached_fid *init_cached_dir(const char *path) 657 { 658 struct cached_fid *cfid; 659 660 cfid = kzalloc(sizeof(*cfid), GFP_ATOMIC); 661 if (!cfid) 662 return NULL; 663 cfid->path = kstrdup(path, GFP_ATOMIC); 664 if (!cfid->path) { 665 kfree(cfid); 666 return NULL; 667 } 668 669 INIT_WORK(&cfid->close_work, cached_dir_offload_close); 670 INIT_WORK(&cfid->put_work, cached_dir_put_work); 671 INIT_LIST_HEAD(&cfid->entry); 672 INIT_LIST_HEAD(&cfid->dirents.entries); 673 mutex_init(&cfid->dirents.de_mutex); 674 spin_lock_init(&cfid->fid_lock); 675 kref_init(&cfid->refcount); 676 return cfid; 677 } 678 679 static void free_cached_dir(struct cached_fid *cfid) 680 { 681 struct cached_dirent *dirent, *q; 682 683 WARN_ON(work_pending(&cfid->close_work)); 684 WARN_ON(work_pending(&cfid->put_work)); 685 686 dput(cfid->dentry); 687 cfid->dentry = NULL; 688 689 /* 690 * Delete all cached dirent names 691 */ 692 list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) { 693 list_del(&dirent->entry); 694 kfree(dirent->name); 695 kfree(dirent); 696 } 697 698 kfree(cfid->path); 699 cfid->path = NULL; 700 kfree(cfid); 701 } 702 703 static void cfids_invalidation_worker(struct work_struct *work) 704 { 705 struct cached_fids *cfids = container_of(work, struct cached_fids, 706 invalidation_work); 707 struct cached_fid *cfid, *q; 708 LIST_HEAD(entry); 709 710 spin_lock(&cfids->cfid_list_lock); 711 /* move cfids->dying to the local list */ 712 list_cut_before(&entry, &cfids->dying, &cfids->dying); 713 spin_unlock(&cfids->cfid_list_lock); 714 715 list_for_each_entry_safe(cfid, q, &entry, entry) { 716 list_del(&cfid->entry); 717 /* Drop the ref-count acquired in invalidate_all_cached_dirs */ 718 kref_put(&cfid->refcount, smb2_close_cached_fid); 719 } 720 } 721 722 static void cfids_laundromat_worker(struct work_struct *work) 723 { 724 struct cached_fids *cfids; 725 struct cached_fid *cfid, *q; 726 struct dentry *dentry; 727 LIST_HEAD(entry); 728 729 cfids = container_of(work, struct cached_fids, laundromat_work.work); 730 731 spin_lock(&cfids->cfid_list_lock); 732 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { 733 if (cfid->time && 734 time_after(jiffies, cfid->time + HZ * dir_cache_timeout)) { 735 cfid->on_list = false; 736 list_move(&cfid->entry, &entry); 737 cfids->num_entries--; 738 if (cfid->has_lease) { 739 /* 740 * Our lease has not yet been cancelled from the 741 * server. Steal that reference. 742 */ 743 cfid->has_lease = false; 744 } else 745 kref_get(&cfid->refcount); 746 } 747 } 748 spin_unlock(&cfids->cfid_list_lock); 749 750 list_for_each_entry_safe(cfid, q, &entry, entry) { 751 list_del(&cfid->entry); 752 753 spin_lock(&cfid->fid_lock); 754 dentry = cfid->dentry; 755 cfid->dentry = NULL; 756 spin_unlock(&cfid->fid_lock); 757 758 dput(dentry); 759 if (cfid->is_open) { 760 spin_lock(&cifs_tcp_ses_lock); 761 ++cfid->tcon->tc_count; 762 trace_smb3_tcon_ref(cfid->tcon->debug_id, cfid->tcon->tc_count, 763 netfs_trace_tcon_ref_get_cached_laundromat); 764 spin_unlock(&cifs_tcp_ses_lock); 765 queue_work(serverclose_wq, &cfid->close_work); 766 } else 767 /* 768 * Drop the ref-count from above, either the lease-ref (if there 769 * was one) or the extra one acquired. 770 */ 771 kref_put(&cfid->refcount, smb2_close_cached_fid); 772 } 773 queue_delayed_work(cfid_put_wq, &cfids->laundromat_work, 774 dir_cache_timeout * HZ); 775 } 776 777 struct cached_fids *init_cached_dirs(void) 778 { 779 struct cached_fids *cfids; 780 781 cfids = kzalloc(sizeof(*cfids), GFP_KERNEL); 782 if (!cfids) 783 return NULL; 784 spin_lock_init(&cfids->cfid_list_lock); 785 INIT_LIST_HEAD(&cfids->entries); 786 INIT_LIST_HEAD(&cfids->dying); 787 788 INIT_WORK(&cfids->invalidation_work, cfids_invalidation_worker); 789 INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker); 790 queue_delayed_work(cfid_put_wq, &cfids->laundromat_work, 791 dir_cache_timeout * HZ); 792 793 return cfids; 794 } 795 796 /* 797 * Called from tconInfoFree when we are tearing down the tcon. 798 * There are no active users or open files/directories at this point. 799 */ 800 void free_cached_dirs(struct cached_fids *cfids) 801 { 802 struct cached_fid *cfid, *q; 803 LIST_HEAD(entry); 804 805 if (cfids == NULL) 806 return; 807 808 cancel_delayed_work_sync(&cfids->laundromat_work); 809 cancel_work_sync(&cfids->invalidation_work); 810 811 spin_lock(&cfids->cfid_list_lock); 812 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) { 813 cfid->on_list = false; 814 cfid->is_open = false; 815 list_move(&cfid->entry, &entry); 816 } 817 list_for_each_entry_safe(cfid, q, &cfids->dying, entry) { 818 cfid->on_list = false; 819 cfid->is_open = false; 820 list_move(&cfid->entry, &entry); 821 } 822 spin_unlock(&cfids->cfid_list_lock); 823 824 list_for_each_entry_safe(cfid, q, &entry, entry) { 825 list_del(&cfid->entry); 826 free_cached_dir(cfid); 827 } 828 829 kfree(cfids); 830 } 831