Lines Matching +full:parent +full:- +full:locked

1 // SPDX-License-Identifier: GPL-2.0-only
6 * (C) 1997 Thomas Schoebel-Theuer,
13 * The dcache is a master of the icache - whenever a dcache entry
38 #include <asm/runtime-const.h>
42 * dcache->d_inode->i_lock protects:
43 * - i_dentry, d_u.d_alias, d_inode of aliases
45 * - the dcache hash table
47 * - the s_roots list (see __d_drop)
48 * dentry->d_sb->s_dentry_lru_lock protects:
49 * - the dcache lru lists and counters
51 * - d_flags
52 * - d_name
53 * - d_lru
54 * - d_count
55 * - d_unhashed()
56 * - d_parent and d_chilren
57 * - childrens' d_sib and d_parent
58 * - d_u.d_alias, d_inode
61 * dentry->d_inode->i_lock
62 * dentry->d_lock
63 * dentry->d_sb->s_dentry_lru_lock
68 * dentry->d_parent->...->d_parent->d_lock
70 * dentry->d_parent->d_lock
71 * dentry->d_lock
100 * to make this good - I've just made it work.
102 * This hash-function tries to avoid losing too many bits of hash
103 * information, yet avoid using a prime hash-size or similar.
124 static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent, in in_lookup_hash() argument
127 hash += (unsigned long) parent / L1_CACHE_BYTES; in in_lookup_hash()
152 * Here we resort to our own counters instead of using generic per-cpu counters
202 .procname = "dentry-state",
209 .procname = "dentry-negative",
240 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
241 * The strings are both count bytes long, and count is non-zero.
245 #include <asm/word-at-a-time.h>
268 tcount -= sizeof(unsigned long); in dentry_string_cmp()
285 tcount--; in dentry_string_cmp()
310 const unsigned char *cs = READ_ONCE(dentry->d_name.name); in dentry_cmp()
317 * Refcounted, freeing is RCU-delayed. See take_dentry_name_snapshot()
318 * for the reason why ->count and ->head can't be combined into a union.
319 * dentry_string_cmp() relies upon ->name[] being word-aligned.
329 return container_of(dentry->d_name.name, struct external_name, name[0]); in external_name()
348 return dentry->d_name.name != dentry->d_shortname.string; in dname_external()
358 seq = read_seqcount_begin(&dentry->d_seq); in take_dentry_name_snapshot()
359 s = READ_ONCE(dentry->d_name.name); in take_dentry_name_snapshot()
360 name->name.hash_len = dentry->d_name.hash_len; in take_dentry_name_snapshot()
361 name->name.name = name->inline_name.string; in take_dentry_name_snapshot()
362 if (likely(s == dentry->d_shortname.string)) { in take_dentry_name_snapshot()
363 name->inline_name = dentry->d_shortname; in take_dentry_name_snapshot()
368 if (unlikely(!atomic_inc_not_zero(&p->count))) in take_dentry_name_snapshot()
370 name->name.name = s; in take_dentry_name_snapshot()
372 if (read_seqcount_retry(&dentry->d_seq, seq)) { in take_dentry_name_snapshot()
382 if (unlikely(name->name.name != name->inline_name.string)) { in release_dentry_name_snapshot()
384 p = container_of(name->name.name, struct external_name, name[0]); in release_dentry_name_snapshot()
385 if (unlikely(atomic_dec_and_test(&p->count))) in release_dentry_name_snapshot()
397 dentry->d_inode = inode; in __d_set_inode_and_type()
398 flags = READ_ONCE(dentry->d_flags); in __d_set_inode_and_type()
401 smp_store_release(&dentry->d_flags, flags); in __d_set_inode_and_type()
406 unsigned flags = READ_ONCE(dentry->d_flags); in __d_clear_type_and_inode()
409 WRITE_ONCE(dentry->d_flags, flags); in __d_clear_type_and_inode()
410 dentry->d_inode = NULL; in __d_clear_type_and_inode()
421 WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias)); in dentry_free()
424 if (likely(atomic_dec_and_test(&p->count))) { in dentry_free()
425 call_rcu(&dentry->d_u.d_rcu, __d_free_external); in dentry_free()
430 if (dentry->d_flags & DCACHE_NORCU) in dentry_free()
431 __d_free(&dentry->d_u.d_rcu); in dentry_free()
433 call_rcu(&dentry->d_u.d_rcu, __d_free); in dentry_free()
441 __releases(dentry->d_lock) in dentry_unlink_inode()
442 __releases(dentry->d_inode->i_lock) in dentry_unlink_inode()
444 struct inode *inode = dentry->d_inode; in dentry_unlink_inode()
446 raw_write_seqcount_begin(&dentry->d_seq); in dentry_unlink_inode()
448 hlist_del_init(&dentry->d_u.d_alias); in dentry_unlink_inode()
449 raw_write_seqcount_end(&dentry->d_seq); in dentry_unlink_inode()
450 spin_unlock(&dentry->d_lock); in dentry_unlink_inode()
451 spin_unlock(&inode->i_lock); in dentry_unlink_inode()
452 if (!inode->i_nlink) in dentry_unlink_inode()
454 if (dentry->d_op && dentry->d_op->d_iput) in dentry_unlink_inode()
455 dentry->d_op->d_iput(dentry, inode); in dentry_unlink_inode()
462 * is in use - which includes both the "real" per-superblock
468 * The per-cpu "nr_dentry_unused" counters are updated with
471 * The per-cpu "nr_dentry_negative" counters are only updated
472 * when deleted from or added to the per-superblock LRU list, not
479 #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_…
483 dentry->d_flags |= DCACHE_LRU_LIST; in d_lru_add()
488 &dentry->d_sb->s_dentry_lru, &dentry->d_lru)); in d_lru_add()
494 dentry->d_flags &= ~DCACHE_LRU_LIST; in d_lru_del()
499 &dentry->d_sb->s_dentry_lru, &dentry->d_lru)); in d_lru_del()
505 list_del_init(&dentry->d_lru); in d_shrink_del()
506 dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST); in d_shrink_del()
513 list_add(&dentry->d_lru, list); in d_shrink_add()
514 dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST; in d_shrink_add()
527 dentry->d_flags &= ~DCACHE_LRU_LIST; in d_lru_isolate()
531 list_lru_isolate(lru, &dentry->d_lru); in d_lru_isolate()
538 dentry->d_flags |= DCACHE_SHRINK_LIST; in d_lru_shrink_move()
541 list_lru_isolate_move(lru, &dentry->d_lru, list); in d_lru_shrink_move()
553 b = &dentry->d_sb->s_roots; in ___d_drop()
555 b = d_hash(dentry->d_name.hash); in ___d_drop()
558 __hlist_bl_del(&dentry->d_hash); in ___d_drop()
566 dentry->d_hash.pprev = NULL; in __d_drop()
567 write_seqcount_invalidate(&dentry->d_seq); in __d_drop()
573 * d_drop - drop a dentry
576 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
578 * deleting the dentry - d_delete will try to mark the dentry negative if
585 * __d_drop requires dentry->d_lock
588 * (dentry->d_hash.pprev will be LIST_POISON2, not NULL).
592 spin_lock(&dentry->d_lock); in d_drop()
594 spin_unlock(&dentry->d_lock); in d_drop()
605 dentry->d_flags |= DCACHE_DENTRY_KILLED; in dentry_unlist()
606 if (unlikely(hlist_unhashed(&dentry->d_sib))) in dentry_unlist()
608 __hlist_del(&dentry->d_sib); in dentry_unlist()
611 * a normal list member, it didn't matter - ->d_sib.next would've in dentry_unlist()
614 * Normally d_walk() doesn't care about cursors moving around - in dentry_unlist()
615 * ->d_lock on parent prevents that and since a cursor has no children in dentry_unlist()
616 * of its own, we get through it without ever unlocking the parent. in dentry_unlist()
617 * There is one exception, though - if we ascend from a child that in dentry_unlist()
619 * using the value left in its ->d_sib.next. And if _that_ in dentry_unlist()
621 * before d_walk() regains parent->d_lock, we'll end up skipping in dentry_unlist()
624 * Solution: make sure that the pointer left behind in ->d_sib.next in dentry_unlist()
628 while (dentry->d_sib.next) { in dentry_unlist()
629 next = hlist_entry(dentry->d_sib.next, struct dentry, d_sib); in dentry_unlist()
630 if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR))) in dentry_unlist()
632 dentry->d_sib.next = next->d_sib.next; in dentry_unlist()
638 struct dentry *parent = NULL; in __dentry_kill() local
644 lockref_mark_dead(&dentry->d_lockref); in __dentry_kill()
650 if (dentry->d_flags & DCACHE_OP_PRUNE) in __dentry_kill()
651 dentry->d_op->d_prune(dentry); in __dentry_kill()
653 if (dentry->d_flags & DCACHE_LRU_LIST) { in __dentry_kill()
654 if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) in __dentry_kill()
659 if (dentry->d_inode) in __dentry_kill()
662 spin_unlock(&dentry->d_lock); in __dentry_kill()
664 if (dentry->d_op && dentry->d_op->d_release) in __dentry_kill()
665 dentry->d_op->d_release(dentry); in __dentry_kill()
668 /* now that it's negative, ->d_parent is stable */ in __dentry_kill()
670 parent = dentry->d_parent; in __dentry_kill()
671 spin_lock(&parent->d_lock); in __dentry_kill()
673 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); in __dentry_kill()
675 if (dentry->d_flags & DCACHE_SHRINK_LIST) in __dentry_kill()
677 spin_unlock(&dentry->d_lock); in __dentry_kill()
680 if (parent && --parent->d_lockref.count) { in __dentry_kill()
681 spin_unlock(&parent->d_lock); in __dentry_kill()
684 return parent; in __dentry_kill()
689 * Called under rcu_read_lock() and dentry->d_lock; the former
695 * that dentry's inode locked.
700 struct inode *inode = dentry->d_inode; in lock_for_kill()
702 if (unlikely(dentry->d_lockref.count)) in lock_for_kill()
705 if (!inode || likely(spin_trylock(&inode->i_lock))) in lock_for_kill()
709 spin_unlock(&dentry->d_lock); in lock_for_kill()
710 spin_lock(&inode->i_lock); in lock_for_kill()
711 spin_lock(&dentry->d_lock); in lock_for_kill()
712 if (likely(inode == dentry->d_inode)) in lock_for_kill()
714 spin_unlock(&inode->i_lock); in lock_for_kill()
715 inode = dentry->d_inode; in lock_for_kill()
717 if (likely(!dentry->d_lockref.count)) in lock_for_kill()
720 spin_unlock(&inode->i_lock); in lock_for_kill()
726 * locked; if not locked, we are more limited and might not be able to tell
727 * without a lock. False in this case means "punt to locked path and recheck".
729 * In case we aren't locked, these predicates are not "stable". However, it is
732 * re-gotten a reference to the dentry and change that, but our work is done -
735 static inline bool retain_dentry(struct dentry *dentry, bool locked) in retain_dentry() argument
740 d_flags = READ_ONCE(dentry->d_flags); in retain_dentry()
750 // ->d_delete() might tell us not to bother, but that requires in retain_dentry()
751 // ->d_lock; can't decide without it in retain_dentry()
753 if (!locked || dentry->d_op->d_delete(dentry)) in retain_dentry()
762 // need to do something - put it on LRU if it wasn't there already in retain_dentry()
764 // Unfortunately, both actions require ->d_lock, so in lockless in retain_dentry()
767 if (!locked) in retain_dentry()
771 if (!locked) in retain_dentry()
773 dentry->d_flags |= DCACHE_REFERENCED; in retain_dentry()
782 spin_lock(&inode->i_lock); in d_mark_dontcache()
783 hlist_for_each_entry(de, &inode->i_dentry, d_u.d_alias) { in d_mark_dontcache()
784 spin_lock(&de->d_lock); in d_mark_dontcache()
785 de->d_flags |= DCACHE_DONTCACHE; in d_mark_dontcache()
786 spin_unlock(&de->d_lock); in d_mark_dontcache()
788 inode->i_state |= I_DONTCACHE; in d_mark_dontcache()
789 spin_unlock(&inode->i_lock); in d_mark_dontcache()
810 ret = lockref_put_return(&dentry->d_lockref); in fast_dput()
818 spin_lock(&dentry->d_lock); in fast_dput()
819 if (WARN_ON_ONCE(dentry->d_lockref.count <= 0)) { in fast_dput()
820 spin_unlock(&dentry->d_lock); in fast_dput()
823 dentry->d_lockref.count--; in fast_dput()
824 goto locked; in fast_dput()
835 * taking the lock? There's a very common case when it's all we need - in fast_dput()
845 * but we'll need to re-check the situation after getting the lock. in fast_dput()
847 spin_lock(&dentry->d_lock); in fast_dput()
855 locked: in fast_dput()
856 if (dentry->d_lockref.count || retain_dentry(dentry, true)) { in fast_dput()
857 spin_unlock(&dentry->d_lock); in fast_dput()
882 * dput - release a dentry
887 * releasing its resources. If the parent dentries were scheduled for release
906 spin_unlock(&dentry->d_lock); in dput()
912 spin_unlock(&dentry->d_lock); in dput()
917 __must_hold(&dentry->d_lock) in to_shrink_list()
919 if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { in to_shrink_list()
920 if (dentry->d_flags & DCACHE_LRU_LIST) in to_shrink_list()
935 spin_unlock(&dentry->d_lock); in dput_to_list()
945 * Do optimistic parent lookup without any in dget_parent()
949 seq = raw_seqcount_begin(&dentry->d_seq); in dget_parent()
950 ret = READ_ONCE(dentry->d_parent); in dget_parent()
951 gotref = lockref_get_not_zero(&ret->d_lockref); in dget_parent()
954 if (!read_seqcount_retry(&dentry->d_seq, seq)) in dget_parent()
961 * Don't need rcu_dereference because we re-check it was correct under in dget_parent()
965 ret = dentry->d_parent; in dget_parent()
966 spin_lock(&ret->d_lock); in dget_parent()
967 if (unlikely(ret != dentry->d_parent)) { in dget_parent()
968 spin_unlock(&ret->d_lock); in dget_parent()
973 BUG_ON(!ret->d_lockref.count); in dget_parent()
974 ret->d_lockref.count++; in dget_parent()
975 spin_unlock(&ret->d_lock); in dget_parent()
984 if (hlist_empty(&inode->i_dentry)) in __d_find_any_alias()
986 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias); in __d_find_any_alias()
987 lockref_get(&alias->d_lockref); in __d_find_any_alias()
992 * d_find_any_alias - find any alias for a given inode
1002 spin_lock(&inode->i_lock); in d_find_any_alias()
1004 spin_unlock(&inode->i_lock); in d_find_any_alias()
1013 if (S_ISDIR(inode->i_mode)) in __d_find_alias()
1016 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) { in __d_find_alias()
1017 spin_lock(&alias->d_lock); in __d_find_alias()
1020 spin_unlock(&alias->d_lock); in __d_find_alias()
1023 spin_unlock(&alias->d_lock); in __d_find_alias()
1029 * d_find_alias - grab a hashed alias of inode
1046 if (!hlist_empty(&inode->i_dentry)) { in d_find_alias()
1047 spin_lock(&inode->i_lock); in d_find_alias()
1049 spin_unlock(&inode->i_lock); in d_find_alias()
1061 struct hlist_head *l = &inode->i_dentry; in d_find_alias_rcu()
1064 spin_lock(&inode->i_lock); in d_find_alias_rcu()
1065 // ->i_dentry and ->i_rcu are colocated, but the latter won't be in d_find_alias_rcu()
1067 if (likely(!(inode->i_state & I_FREEING) && !hlist_empty(l))) { in d_find_alias_rcu()
1068 if (S_ISDIR(inode->i_mode)) { in d_find_alias_rcu()
1069 de = hlist_entry(l->first, struct dentry, d_u.d_alias); in d_find_alias_rcu()
1076 spin_unlock(&inode->i_lock); in d_find_alias_rcu()
1089 spin_lock(&inode->i_lock); in d_prune_aliases()
1090 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) { in d_prune_aliases()
1091 spin_lock(&dentry->d_lock); in d_prune_aliases()
1092 if (!dentry->d_lockref.count) in d_prune_aliases()
1094 spin_unlock(&dentry->d_lock); in d_prune_aliases()
1096 spin_unlock(&inode->i_lock); in d_prune_aliases()
1110 spin_unlock(&victim->d_lock); in shrink_kill()
1118 dentry = list_entry(list->prev, struct dentry, d_lru); in shrink_dentry_list()
1119 spin_lock(&dentry->d_lock); in shrink_dentry_list()
1125 can_free = dentry->d_flags & DCACHE_DENTRY_KILLED; in shrink_dentry_list()
1126 spin_unlock(&dentry->d_lock); in shrink_dentry_list()
1144 * we are inverting the lru lock/dentry->d_lock here, in dentry_lru_isolate()
1148 if (!spin_trylock(&dentry->d_lock)) in dentry_lru_isolate()
1156 if (dentry->d_lockref.count) { in dentry_lru_isolate()
1158 spin_unlock(&dentry->d_lock); in dentry_lru_isolate()
1162 if (dentry->d_flags & DCACHE_REFERENCED) { in dentry_lru_isolate()
1163 dentry->d_flags &= ~DCACHE_REFERENCED; in dentry_lru_isolate()
1164 spin_unlock(&dentry->d_lock); in dentry_lru_isolate()
1168 * this point, we've dropped the dentry->d_lock but keep the in dentry_lru_isolate()
1189 spin_unlock(&dentry->d_lock); in dentry_lru_isolate()
1195 * prune_dcache_sb - shrink the dcache
1199 * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This
1211 freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc, in prune_dcache_sb()
1224 * we are inverting the lru lock/dentry->d_lock here, in dentry_lru_isolate_shrink()
1228 if (!spin_trylock(&dentry->d_lock)) in dentry_lru_isolate_shrink()
1232 spin_unlock(&dentry->d_lock); in dentry_lru_isolate_shrink()
1239 * shrink_dcache_sb - shrink dcache for a superblock
1250 list_lru_walk(&sb->s_dentry_lru, in shrink_dcache_sb()
1253 } while (list_lru_count(&sb->s_dentry_lru) > 0); in shrink_dcache_sb()
1258 * enum d_walk_ret - action to talke during tree walk
1272 * d_walk - walk the dentry tree
1273 * @parent: start of walk
1279 static void d_walk(struct dentry *parent, void *data, in d_walk() argument
1289 this_parent = parent; in d_walk()
1290 spin_lock(&this_parent->d_lock); in d_walk()
1307 if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR)) in d_walk()
1310 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); in d_walk()
1317 spin_unlock(&dentry->d_lock); in d_walk()
1323 spin_unlock(&dentry->d_lock); in d_walk()
1327 if (!hlist_empty(&dentry->d_children)) { in d_walk()
1328 spin_unlock(&this_parent->d_lock); in d_walk()
1329 spin_release(&dentry->d_lock.dep_map, _RET_IP_); in d_walk()
1331 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); in d_walk()
1334 spin_unlock(&dentry->d_lock); in d_walk()
1341 if (this_parent != parent) { in d_walk()
1343 this_parent = dentry->d_parent; in d_walk()
1345 spin_unlock(&dentry->d_lock); in d_walk()
1346 spin_lock(&this_parent->d_lock); in d_walk()
1348 /* might go back up the wrong parent if we have had a rename. */ in d_walk()
1353 if (likely(!(dentry->d_flags & DCACHE_DENTRY_KILLED))) { in d_walk()
1365 spin_unlock(&this_parent->d_lock); in d_walk()
1370 spin_unlock(&this_parent->d_lock); in d_walk()
1387 struct path path = { .mnt = info->mnt, .dentry = dentry }; in path_check_mount()
1392 info->mounted = 1; in path_check_mount()
1399 * path_has_submounts - check for mounts over a dentry in the
1401 * @parent: path to check.
1403 * Return true if the parent or its subdirectories contain
1406 int path_has_submounts(const struct path *parent) in path_has_submounts() argument
1408 struct check_mount data = { .mnt = parent->mnt, .mounted = 0 }; in path_has_submounts()
1411 d_walk(parent->dentry, &data, path_check_mount); in path_has_submounts()
1429 int ret = -ENOENT; in d_set_mounted()
1431 for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) { in d_set_mounted()
1433 spin_lock(&p->d_lock); in d_set_mounted()
1435 spin_unlock(&p->d_lock); in d_set_mounted()
1438 spin_unlock(&p->d_lock); in d_set_mounted()
1440 spin_lock(&dentry->d_lock); in d_set_mounted()
1442 ret = -EBUSY; in d_set_mounted()
1444 dentry->d_flags |= DCACHE_MOUNTED; in d_set_mounted()
1448 spin_unlock(&dentry->d_lock); in d_set_mounted()
1455 * Search the dentry child list of the specified parent,
1458 * whenever the d_children list is non-empty and continue
1483 if (data->start == dentry) in select_collect()
1486 if (dentry->d_flags & DCACHE_SHRINK_LIST) { in select_collect()
1487 data->found++; in select_collect()
1488 } else if (!dentry->d_lockref.count) { in select_collect()
1489 to_shrink_list(dentry, &data->dispose); in select_collect()
1490 data->found++; in select_collect()
1491 } else if (dentry->d_lockref.count < 0) { in select_collect()
1492 data->found++; in select_collect()
1499 if (!list_empty(&data->dispose)) in select_collect()
1510 if (data->start == dentry) in select_collect2()
1513 if (!dentry->d_lockref.count) { in select_collect2()
1514 if (dentry->d_flags & DCACHE_SHRINK_LIST) { in select_collect2()
1516 data->victim = dentry; in select_collect2()
1519 to_shrink_list(dentry, &data->dispose); in select_collect2()
1526 if (!list_empty(&data->dispose)) in select_collect2()
1533 * shrink_dcache_parent - prune dcache
1534 * @parent: parent of entries to prune
1536 * Prune the dcache to remove unused children of the parent dentry.
1538 void shrink_dcache_parent(struct dentry *parent) in shrink_dcache_parent() argument
1541 struct select_data data = {.start = parent}; in shrink_dcache_parent()
1544 d_walk(parent, &data, select_collect); in shrink_dcache_parent()
1555 d_walk(parent, &data, select_collect2); in shrink_dcache_parent()
1557 spin_lock(&data.victim->d_lock); in shrink_dcache_parent()
1559 spin_unlock(&data.victim->d_lock); in shrink_dcache_parent()
1574 if (!hlist_empty(&dentry->d_children)) in umount_check()
1578 if (dentry == _data && dentry->d_lockref.count == 1) in umount_check()
1584 dentry->d_inode ? in umount_check()
1585 dentry->d_inode->i_ino : 0UL, in umount_check()
1587 dentry->d_lockref.count, in umount_check()
1588 dentry->d_sb->s_type->name, in umount_check()
1589 dentry->d_sb->s_id); in umount_check()
1608 rwsem_assert_held_write(&sb->s_umount); in shrink_dcache_for_umount()
1610 dentry = sb->s_root; in shrink_dcache_for_umount()
1611 sb->s_root = NULL; in shrink_dcache_for_umount()
1614 while (!hlist_bl_empty(&sb->s_roots)) { in shrink_dcache_for_umount()
1615 dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_roots), struct dentry, d_hash)); in shrink_dcache_for_umount()
1631 * d_invalidate - detach submounts, prune dcache, and drop
1637 spin_lock(&dentry->d_lock); in d_invalidate()
1639 spin_unlock(&dentry->d_lock); in d_invalidate()
1643 spin_unlock(&dentry->d_lock); in d_invalidate()
1646 if (!dentry->d_inode) in d_invalidate()
1666 * __d_alloc - allocate a dcache entry
1681 dentry = kmem_cache_alloc_lru(dentry_cache, &sb->s_dentry_lru, in __d_alloc()
1687 * We guarantee that the inline name is always NUL-terminated. in __d_alloc()
1692 dentry->d_shortname.string[DNAME_INLINE_LEN-1] = 0; in __d_alloc()
1695 dname = dentry->d_shortname.string; in __d_alloc()
1696 } else if (name->len > DNAME_INLINE_LEN-1) { in __d_alloc()
1698 struct external_name *p = kmalloc(size + name->len, in __d_alloc()
1705 atomic_set(&p->count, 1); in __d_alloc()
1706 dname = p->name; in __d_alloc()
1708 dname = dentry->d_shortname.string; in __d_alloc()
1711 dentry->d_name.len = name->len; in __d_alloc()
1712 dentry->d_name.hash = name->hash; in __d_alloc()
1713 memcpy(dname, name->name, name->len); in __d_alloc()
1714 dname[name->len] = 0; in __d_alloc()
1717 smp_store_release(&dentry->d_name.name, dname); /* ^^^ */ in __d_alloc()
1719 dentry->d_flags = 0; in __d_alloc()
1720 lockref_init(&dentry->d_lockref); in __d_alloc()
1721 seqcount_spinlock_init(&dentry->d_seq, &dentry->d_lock); in __d_alloc()
1722 dentry->d_inode = NULL; in __d_alloc()
1723 dentry->d_parent = dentry; in __d_alloc()
1724 dentry->d_sb = sb; in __d_alloc()
1725 dentry->d_op = NULL; in __d_alloc()
1726 dentry->d_fsdata = NULL; in __d_alloc()
1727 INIT_HLIST_BL_NODE(&dentry->d_hash); in __d_alloc()
1728 INIT_LIST_HEAD(&dentry->d_lru); in __d_alloc()
1729 INIT_HLIST_HEAD(&dentry->d_children); in __d_alloc()
1730 INIT_HLIST_NODE(&dentry->d_u.d_alias); in __d_alloc()
1731 INIT_HLIST_NODE(&dentry->d_sib); in __d_alloc()
1732 d_set_d_op(dentry, dentry->d_sb->s_d_op); in __d_alloc()
1734 if (dentry->d_op && dentry->d_op->d_init) { in __d_alloc()
1735 err = dentry->d_op->d_init(dentry); in __d_alloc()
1750 * d_alloc - allocate a dcache entry
1751 * @parent: parent of entry to allocate
1758 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) in d_alloc() argument
1760 struct dentry *dentry = __d_alloc(parent->d_sb, name); in d_alloc()
1763 spin_lock(&parent->d_lock); in d_alloc()
1768 dentry->d_parent = dget_dlock(parent); in d_alloc()
1769 hlist_add_head(&dentry->d_sib, &parent->d_children); in d_alloc()
1770 spin_unlock(&parent->d_lock); in d_alloc()
1782 struct dentry *d_alloc_cursor(struct dentry * parent) in d_alloc_cursor() argument
1784 struct dentry *dentry = d_alloc_anon(parent->d_sb); in d_alloc_cursor()
1786 dentry->d_flags |= DCACHE_DENTRY_CURSOR; in d_alloc_cursor()
1787 dentry->d_parent = dget(parent); in d_alloc_cursor()
1793 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1799 * This is used for pipes, sockets et.al. - the stuff that should
1814 dentry->d_flags |= DCACHE_NORCU; in d_alloc_pseudo()
1815 if (!sb->s_d_op) in d_alloc_pseudo()
1821 struct dentry *d_alloc_name(struct dentry *parent, const char *name) in d_alloc_name() argument
1826 q.hash_len = hashlen_string(parent, name); in d_alloc_name()
1827 return d_alloc(parent, &q); in d_alloc_name()
1833 WARN_ON_ONCE(dentry->d_op); in d_set_d_op()
1834 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH | in d_set_d_op()
1840 dentry->d_op = op; in d_set_d_op()
1843 if (op->d_hash) in d_set_d_op()
1844 dentry->d_flags |= DCACHE_OP_HASH; in d_set_d_op()
1845 if (op->d_compare) in d_set_d_op()
1846 dentry->d_flags |= DCACHE_OP_COMPARE; in d_set_d_op()
1847 if (op->d_revalidate) in d_set_d_op()
1848 dentry->d_flags |= DCACHE_OP_REVALIDATE; in d_set_d_op()
1849 if (op->d_weak_revalidate) in d_set_d_op()
1850 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE; in d_set_d_op()
1851 if (op->d_delete) in d_set_d_op()
1852 dentry->d_flags |= DCACHE_OP_DELETE; in d_set_d_op()
1853 if (op->d_prune) in d_set_d_op()
1854 dentry->d_flags |= DCACHE_OP_PRUNE; in d_set_d_op()
1855 if (op->d_real) in d_set_d_op()
1856 dentry->d_flags |= DCACHE_OP_REAL; in d_set_d_op()
1868 if (S_ISDIR(inode->i_mode)) { in d_flags_for_inode()
1870 if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) { in d_flags_for_inode()
1871 if (unlikely(!inode->i_op->lookup)) in d_flags_for_inode()
1874 inode->i_opflags |= IOP_LOOKUP; in d_flags_for_inode()
1879 if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) { in d_flags_for_inode()
1880 if (unlikely(inode->i_op->get_link)) { in d_flags_for_inode()
1884 inode->i_opflags |= IOP_NOFOLLOW; in d_flags_for_inode()
1887 if (unlikely(!S_ISREG(inode->i_mode))) in d_flags_for_inode()
1901 spin_lock(&dentry->d_lock); in __d_instantiate()
1906 if ((dentry->d_flags & in __d_instantiate()
1909 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); in __d_instantiate()
1910 raw_write_seqcount_begin(&dentry->d_seq); in __d_instantiate()
1912 raw_write_seqcount_end(&dentry->d_seq); in __d_instantiate()
1914 spin_unlock(&dentry->d_lock); in __d_instantiate()
1918 * d_instantiate - fill in inode information for a dentry
1934 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias)); in d_instantiate()
1937 spin_lock(&inode->i_lock); in d_instantiate()
1939 spin_unlock(&inode->i_lock); in d_instantiate()
1946 * with lockdep-related part of unlock_new_inode() done before
1947 * anything else. Use that instead of open-coding d_instantiate()/
1952 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias)); in d_instantiate_new()
1956 spin_lock(&inode->i_lock); in d_instantiate_new()
1958 WARN_ON(!(inode->i_state & I_NEW)); in d_instantiate_new()
1959 inode->i_state &= ~I_NEW & ~I_CREATING; in d_instantiate_new()
1967 spin_unlock(&inode->i_lock); in d_instantiate_new()
1976 res = d_alloc_anon(root_inode->i_sb); in d_make_root()
1992 return ERR_PTR(-ESTALE); in __d_obtain_alias()
1996 sb = inode->i_sb; in __d_obtain_alias()
2004 res = ERR_PTR(-ENOMEM); in __d_obtain_alias()
2009 spin_lock(&inode->i_lock); in __d_obtain_alias()
2017 spin_lock(&new->d_lock); in __d_obtain_alias()
2019 hlist_add_head(&new->d_u.d_alias, &inode->i_dentry); in __d_obtain_alias()
2021 hlist_bl_lock(&sb->s_roots); in __d_obtain_alias()
2022 hlist_bl_add_head(&new->d_hash, &sb->s_roots); in __d_obtain_alias()
2023 hlist_bl_unlock(&sb->s_roots); in __d_obtain_alias()
2025 spin_unlock(&new->d_lock); in __d_obtain_alias()
2026 spin_unlock(&inode->i_lock); in __d_obtain_alias()
2027 inode = NULL; /* consumed by new->d_inode */ in __d_obtain_alias()
2030 spin_unlock(&inode->i_lock); in __d_obtain_alias()
2040 * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode
2055 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
2064 * d_obtain_root - find or allocate a dentry for a given inode
2076 * replaced by ERR_PTR(-ESTALE).
2085 * d_add_ci - lookup or allocate new dentry with case-exact name
2086 * @dentry: the negative dentry that was passed to the parent's lookup func
2087 * @inode: the inode case-insensitive lookup has found
2088 * @name: the case-exact name to be associated with the returned dentry
2090 * This is to avoid filling the dcache with case-insensitive names to the
2092 * case-insensitive filesystems.
2094 * For a case-insensitive lookup match and if the case-exact dentry
2109 found = d_hash_and_lookup(dentry->d_parent, name); in d_add_ci()
2115 found = d_alloc_parallel(dentry->d_parent, name, in d_add_ci()
2116 dentry->d_wait); in d_add_ci()
2122 found = d_alloc(dentry->d_parent, name); in d_add_ci()
2125 return ERR_PTR(-ENOMEM); in d_add_ci()
2139 * d_same_name - compare dentry name with case-exact name
2140 * @dentry: the negative dentry that was passed to the parent's lookup func
2141 * @parent: parent dentry
2142 * @name: the case-exact name to be associated with the returned dentry
2146 bool d_same_name(const struct dentry *dentry, const struct dentry *parent, in d_same_name() argument
2149 if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) { in d_same_name()
2150 if (dentry->d_name.len != name->len) in d_same_name()
2152 return dentry_cmp(dentry, name->name, name->len) == 0; in d_same_name()
2154 return parent->d_op->d_compare(dentry, in d_same_name()
2155 dentry->d_name.len, dentry->d_name.name, in d_same_name()
2161 * This is __d_lookup_rcu() when the parent dentry has
2165 const struct dentry *parent, in __d_lookup_rcu_op_compare() argument
2169 u64 hashlen = name->hash_len; in __d_lookup_rcu_op_compare()
2180 seq = raw_seqcount_begin(&dentry->d_seq); in __d_lookup_rcu_op_compare()
2181 if (dentry->d_parent != parent) in __d_lookup_rcu_op_compare()
2185 if (dentry->d_name.hash != hashlen_hash(hashlen)) in __d_lookup_rcu_op_compare()
2187 tlen = dentry->d_name.len; in __d_lookup_rcu_op_compare()
2188 tname = dentry->d_name.name; in __d_lookup_rcu_op_compare()
2190 if (read_seqcount_retry(&dentry->d_seq, seq)) { in __d_lookup_rcu_op_compare()
2194 if (parent->d_op->d_compare(dentry, tlen, tname, name) != 0) in __d_lookup_rcu_op_compare()
2203 * __d_lookup_rcu - search for a dentry (racy, store-free)
2204 * @parent: parent dentry
2209 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2210 * resolution (store-free path walking) design described in
2211 * Documentation/filesystems/path-lookup.txt.
2215 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2221 * the returned dentry, so long as its parent's seqlock is checked after the
2228 struct dentry *__d_lookup_rcu(const struct dentry *parent, in __d_lookup_rcu() argument
2232 u64 hashlen = name->hash_len; in __d_lookup_rcu()
2233 const unsigned char *str = name->name; in __d_lookup_rcu()
2245 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) in __d_lookup_rcu()
2246 return __d_lookup_rcu_op_compare(parent, name, seqp); in __d_lookup_rcu()
2256 * false-negative result. d_lookup() protects against concurrent in __d_lookup_rcu()
2259 * See Documentation/filesystems/path-lookup.txt for more details. in __d_lookup_rcu()
2266 * renames, and thus protects parent and name fields. in __d_lookup_rcu()
2279 * we are still guaranteed NUL-termination of ->d_name.name. in __d_lookup_rcu()
2281 seq = raw_seqcount_begin(&dentry->d_seq); in __d_lookup_rcu()
2282 if (dentry->d_parent != parent) in __d_lookup_rcu()
2286 if (dentry->d_name.hash_len != hashlen) in __d_lookup_rcu()
2297 * d_lookup - search for a dentry
2298 * @parent: parent dentry
2302 * d_lookup searches the children of the parent dentry for the name in
2307 struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name) in d_lookup() argument
2314 dentry = __d_lookup(parent, name); in d_lookup()
2323 * __d_lookup - search for a dentry (racy)
2324 * @parent: parent dentry
2329 * false-negative result due to unrelated rename activity.
2337 struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name) in __d_lookup() argument
2339 unsigned int hash = name->hash; in __d_lookup()
2360 * false-negative result. d_lookup() protects against concurrent in __d_lookup()
2363 * See Documentation/filesystems/path-lookup.txt for more details. in __d_lookup()
2369 if (dentry->d_name.hash != hash) in __d_lookup()
2372 spin_lock(&dentry->d_lock); in __d_lookup()
2373 if (dentry->d_parent != parent) in __d_lookup()
2378 if (!d_same_name(dentry, parent, name)) in __d_lookup()
2381 dentry->d_lockref.count++; in __d_lookup()
2383 spin_unlock(&dentry->d_lock); in __d_lookup()
2386 spin_unlock(&dentry->d_lock); in __d_lookup()
2394 * d_hash_and_lookup - hash the qstr then search for a dentry
2398 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2403 * Check for a fs-specific hash function. Note that we must in d_hash_and_lookup()
2404 * calculate the standard hash first, as the d_op->d_hash() in d_hash_and_lookup()
2407 name->hash = full_name_hash(dir, name->name, name->len); in d_hash_and_lookup()
2408 if (dir->d_flags & DCACHE_OP_HASH) { in d_hash_and_lookup()
2409 int err = dir->d_op->d_hash(dir, name); in d_hash_and_lookup()
2419 * - turn this dentry into a negative dentry
2420 * - unhash this dentry and free it.
2431 * d_delete - delete a dentry
2440 struct inode *inode = dentry->d_inode; in d_delete()
2442 spin_lock(&inode->i_lock); in d_delete()
2443 spin_lock(&dentry->d_lock); in d_delete()
2447 if (dentry->d_lockref.count == 1) { in d_delete()
2450 dentry->d_flags &= ~DCACHE_CANT_MOUNT; in d_delete()
2454 spin_unlock(&dentry->d_lock); in d_delete()
2455 spin_unlock(&inode->i_lock); in d_delete()
2462 struct hlist_bl_head *b = d_hash(entry->d_name.hash); in __d_rehash()
2465 hlist_bl_add_head_rcu(&entry->d_hash, b); in __d_rehash()
2470 * d_rehash - add an entry back to the hash
2478 spin_lock(&entry->d_lock); in d_rehash()
2480 spin_unlock(&entry->d_lock); in d_rehash()
2488 unsigned n = dir->i_dir_seq; in start_dir_add()
2489 if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n) in start_dir_add()
2498 smp_store_release(&dir->i_dir_seq, n + 2); in end_dir_add()
2508 add_wait_queue(dentry->d_wait, &wait); in d_wait_lookup()
2511 spin_unlock(&dentry->d_lock); in d_wait_lookup()
2513 spin_lock(&dentry->d_lock); in d_wait_lookup()
2518 struct dentry *d_alloc_parallel(struct dentry *parent, in d_alloc_parallel() argument
2522 unsigned int hash = name->hash; in d_alloc_parallel()
2523 struct hlist_bl_head *b = in_lookup_hash(parent, hash); in d_alloc_parallel()
2525 struct dentry *new = d_alloc(parent, name); in d_alloc_parallel()
2530 return ERR_PTR(-ENOMEM); in d_alloc_parallel()
2534 seq = smp_load_acquire(&parent->d_inode->i_dir_seq); in d_alloc_parallel()
2536 dentry = __d_lookup_rcu(parent, name, &d_seq); in d_alloc_parallel()
2538 if (!lockref_get_not_dead(&dentry->d_lockref)) { in d_alloc_parallel()
2542 if (read_seqcount_retry(&dentry->d_seq, d_seq)) { in d_alloc_parallel()
2562 if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) { in d_alloc_parallel()
2568 * No changes for the parent since the beginning of d_lookup(). in d_alloc_parallel()
2570 * any potential in-lookup matches are going to stay here until in d_alloc_parallel()
2575 if (dentry->d_name.hash != hash) in d_alloc_parallel()
2577 if (dentry->d_parent != parent) in d_alloc_parallel()
2579 if (!d_same_name(dentry, parent, name)) in d_alloc_parallel()
2583 if (!lockref_get_not_dead(&dentry->d_lockref)) { in d_alloc_parallel()
2593 spin_lock(&dentry->d_lock); in d_alloc_parallel()
2596 * it's not in-lookup anymore; in principle we should repeat in d_alloc_parallel()
2601 if (unlikely(dentry->d_name.hash != hash)) in d_alloc_parallel()
2603 if (unlikely(dentry->d_parent != parent)) in d_alloc_parallel()
2607 if (unlikely(!d_same_name(dentry, parent, name))) in d_alloc_parallel()
2610 spin_unlock(&dentry->d_lock); in d_alloc_parallel()
2615 /* we can't take ->d_lock here; it's OK, though. */ in d_alloc_parallel()
2616 new->d_flags |= DCACHE_PAR_LOOKUP; in d_alloc_parallel()
2617 new->d_wait = wq; in d_alloc_parallel()
2618 hlist_bl_add_head(&new->d_u.d_in_lookup_hash, b); in d_alloc_parallel()
2622 spin_unlock(&dentry->d_lock); in d_alloc_parallel()
2629 * - Unhash the dentry
2630 * - Retrieve and clear the waitqueue head in dentry
2631 * - Return the waitqueue head
2638 lockdep_assert_held(&dentry->d_lock); in __d_lookup_unhash()
2640 b = in_lookup_hash(dentry->d_parent, dentry->d_name.hash); in __d_lookup_unhash()
2642 dentry->d_flags &= ~DCACHE_PAR_LOOKUP; in __d_lookup_unhash()
2643 __hlist_bl_del(&dentry->d_u.d_in_lookup_hash); in __d_lookup_unhash()
2644 d_wait = dentry->d_wait; in __d_lookup_unhash()
2645 dentry->d_wait = NULL; in __d_lookup_unhash()
2647 INIT_HLIST_NODE(&dentry->d_u.d_alias); in __d_lookup_unhash()
2648 INIT_LIST_HEAD(&dentry->d_lru); in __d_lookup_unhash()
2654 spin_lock(&dentry->d_lock); in __d_lookup_unhash_wake()
2656 spin_unlock(&dentry->d_lock); in __d_lookup_unhash_wake()
2660 /* inode->i_lock held if inode is non-NULL */
2667 spin_lock(&dentry->d_lock); in __d_add()
2669 dir = dentry->d_parent->d_inode; in __d_add()
2675 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); in __d_add()
2676 raw_write_seqcount_begin(&dentry->d_seq); in __d_add()
2678 raw_write_seqcount_end(&dentry->d_seq); in __d_add()
2684 spin_unlock(&dentry->d_lock); in __d_add()
2686 spin_unlock(&inode->i_lock); in __d_add()
2690 * d_add - add dentry to hash queues
2702 spin_lock(&inode->i_lock); in d_add()
2715 swap(target->d_name.name, dentry->d_name.name); in swap_names()
2721 dentry->d_name.name = target->d_name.name; in swap_names()
2722 target->d_shortname = dentry->d_shortname; in swap_names()
2723 target->d_name.name = target->d_shortname.string; in swap_names()
2731 target->d_name.name = dentry->d_name.name; in swap_names()
2732 dentry->d_shortname = target->d_shortname; in swap_names()
2733 dentry->d_name.name = dentry->d_shortname.string; in swap_names()
2739 swap(dentry->d_shortname.words[i], in swap_names()
2740 target->d_shortname.words[i]); in swap_names()
2743 swap(dentry->d_name.hash_len, target->d_name.hash_len); in swap_names()
2752 atomic_inc(&external_name(target)->count); in copy_name()
2753 dentry->d_name = target->d_name; in copy_name()
2755 dentry->d_shortname = target->d_shortname; in copy_name()
2756 dentry->d_name.name = dentry->d_shortname.string; in copy_name()
2757 dentry->d_name.hash_len = target->d_name.hash_len; in copy_name()
2759 if (old_name && likely(atomic_dec_and_test(&old_name->count))) in copy_name()
2764 * __d_move - move a dentry
2772 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2782 WARN_ON(!dentry->d_inode); in __d_move()
2787 old_parent = dentry->d_parent; in __d_move()
2791 spin_lock(&target->d_parent->d_lock); in __d_move()
2793 /* target is not a descendent of dentry->d_parent */ in __d_move()
2794 spin_lock(&target->d_parent->d_lock); in __d_move()
2795 spin_lock_nested(&old_parent->d_lock, DENTRY_D_LOCK_NESTED); in __d_move()
2798 spin_lock(&old_parent->d_lock); in __d_move()
2800 spin_lock_nested(&target->d_parent->d_lock, in __d_move()
2803 spin_lock_nested(&dentry->d_lock, 2); in __d_move()
2804 spin_lock_nested(&target->d_lock, 3); in __d_move()
2807 dir = target->d_parent->d_inode; in __d_move()
2812 write_seqcount_begin(&dentry->d_seq); in __d_move()
2813 write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED); in __d_move()
2822 dentry->d_parent = target->d_parent; in __d_move()
2825 target->d_hash.pprev = NULL; in __d_move()
2826 dentry->d_parent->d_lockref.count++; in __d_move()
2828 WARN_ON(!--old_parent->d_lockref.count); in __d_move()
2830 target->d_parent = old_parent; in __d_move()
2832 if (!hlist_unhashed(&target->d_sib)) in __d_move()
2833 __hlist_del(&target->d_sib); in __d_move()
2834 hlist_add_head(&target->d_sib, &target->d_parent->d_children); in __d_move()
2838 if (!hlist_unhashed(&dentry->d_sib)) in __d_move()
2839 __hlist_del(&dentry->d_sib); in __d_move()
2840 hlist_add_head(&dentry->d_sib, &dentry->d_parent->d_children); in __d_move()
2845 write_seqcount_end(&target->d_seq); in __d_move()
2846 write_seqcount_end(&dentry->d_seq); in __d_move()
2851 if (dentry->d_parent != old_parent) in __d_move()
2852 spin_unlock(&dentry->d_parent->d_lock); in __d_move()
2854 spin_unlock(&old_parent->d_lock); in __d_move()
2855 spin_unlock(&target->d_lock); in __d_move()
2856 spin_unlock(&dentry->d_lock); in __d_move()
2860 * d_move - move a dentry
2877 * d_exchange - exchange two dentries
2885 WARN_ON(!dentry1->d_inode); in d_exchange()
2886 WARN_ON(!dentry2->d_inode); in d_exchange()
2896 * d_ancestor - search for an ancestor
2907 for (p = p2; !IS_ROOT(p); p = p->d_parent) { in d_ancestor()
2908 if (p->d_parent == p1) in d_ancestor()
2918 * dentry->d_parent->d_inode->i_mutex, and rename_lock
2927 int ret = -ESTALE; in __d_unalias()
2929 /* If alias and dentry share a parent, then no extra locks required */ in __d_unalias()
2930 if (alias->d_parent == dentry->d_parent) in __d_unalias()
2934 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex)) in __d_unalias()
2936 m1 = &dentry->d_sb->s_vfs_rename_mutex; in __d_unalias()
2937 if (!inode_trylock_shared(alias->d_parent->d_inode)) in __d_unalias()
2939 m2 = &alias->d_parent->d_inode->i_rwsem; in __d_unalias()
2941 if (alias->d_op && alias->d_op->d_unalias_trylock && in __d_unalias()
2942 !alias->d_op->d_unalias_trylock(alias)) in __d_unalias()
2945 if (alias->d_op && alias->d_op->d_unalias_unlock) in __d_unalias()
2946 alias->d_op->d_unalias_unlock(alias); in __d_unalias()
2957 * d_splice_alias - splice a disconnected dentry into the tree if one exists
2965 * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
2972 * is returned. This matches the expected return value of ->lookup.
2990 spin_lock(&inode->i_lock); in d_splice_alias()
2991 if (S_ISDIR(inode->i_mode)) { in d_splice_alias()
2995 spin_unlock(&inode->i_lock); in d_splice_alias()
3000 new = ERR_PTR(-ELOOP); in d_splice_alias()
3004 dentry->d_name.name, in d_splice_alias()
3005 inode->i_sb->s_type->name, in d_splice_alias()
3006 inode->i_sb->s_id); in d_splice_alias()
3008 struct dentry *old_parent = dget(new->d_parent); in d_splice_alias()
3037 * is_subdir - is new dentry a subdirectory of old_dentry
3041 * Returns true if new_dentry is a subdirectory of the parent (at any depth).
3074 if (d_unhashed(dentry) || !dentry->d_inode) in d_genocide_kill()
3077 if (!(dentry->d_flags & DCACHE_GENOCIDE)) { in d_genocide_kill()
3078 dentry->d_flags |= DCACHE_GENOCIDE; in d_genocide_kill()
3079 dentry->d_lockref.count--; in d_genocide_kill()
3085 void d_genocide(struct dentry *parent) in d_genocide() argument
3087 d_walk(parent, parent, d_genocide_kill); in d_genocide()
3092 struct dentry *dentry = file->f_path.dentry; in d_mark_tmpfile()
3095 !hlist_unhashed(&dentry->d_u.d_alias) || in d_mark_tmpfile()
3097 spin_lock(&dentry->d_parent->d_lock); in d_mark_tmpfile()
3098 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); in d_mark_tmpfile()
3099 dentry->d_name.len = sprintf(dentry->d_shortname.string, "#%llu", in d_mark_tmpfile()
3100 (unsigned long long)inode->i_ino); in d_mark_tmpfile()
3101 spin_unlock(&dentry->d_lock); in d_mark_tmpfile()
3102 spin_unlock(&dentry->d_parent->d_lock); in d_mark_tmpfile()
3108 struct dentry *dentry = file->f_path.dentry; in d_tmpfile()
3117 * Obtain inode number of the parent dentry.
3121 struct dentry *parent; in d_parent_ino() local
3127 seq = raw_seqcount_begin(&dentry->d_seq); in d_parent_ino()
3128 parent = READ_ONCE(dentry->d_parent); in d_parent_ino()
3129 iparent = d_inode_rcu(parent); in d_parent_ino()
3131 ret = iparent->i_ino; in d_parent_ino()
3132 if (!read_seqcount_retry(&dentry->d_seq, seq)) in d_parent_ino()
3137 spin_lock(&dentry->d_lock); in d_parent_ino()
3138 ret = dentry->d_parent->d_inode->i_ino; in d_parent_ino()
3139 spin_unlock(&dentry->d_lock); in d_parent_ino()
3172 d_hash_shift = 32 - d_hash_shift; in dcache_init_early()
3203 d_hash_shift = 32 - d_hash_shift; in dcache_init()