Lines Matching +full:parent +full:- +full:locked

1 // SPDX-License-Identifier: GPL-2.0-only
6 * (C) 1997 Thomas Schoebel-Theuer,
13 * The dcache is a master of the icache - whenever a dcache entry
40 * dcache->d_inode->i_lock protects:
41 * - i_dentry, d_u.d_alias, d_inode of aliases
43 * - the dcache hash table
45 * - the s_roots list (see __d_drop)
46 * dentry->d_sb->s_dentry_lru_lock protects:
47 * - the dcache lru lists and counters
49 * - d_flags
50 * - d_name
51 * - d_lru
52 * - d_count
53 * - d_unhashed()
54 * - d_parent and d_chilren
55 * - childrens' d_sib and d_parent
56 * - d_u.d_alias, d_inode
59 * dentry->d_inode->i_lock
60 * dentry->d_lock
61 * dentry->d_sb->s_dentry_lru_lock
66 * dentry->d_parent->...->d_parent->d_lock
68 * dentry->d_parent->d_lock
69 * dentry->d_lock
93 * to make this good - I've just made it work.
95 * This hash-function tries to avoid losing too many bits of hash
96 * information, yet avoid using a prime hash-size or similar.
111 static inline struct hlist_bl_head *in_lookup_hash(const struct dentry *parent, in in_lookup_hash() argument
114 hash += (unsigned long) parent / L1_CACHE_BYTES; in in_lookup_hash()
138 * Here we resort to our own counters instead of using generic per-cpu counters
188 .procname = "dentry-state",
205 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
206 * The strings are both count bytes long, and count is non-zero.
210 #include <asm/word-at-a-time.h>
233 tcount -= sizeof(unsigned long); in dentry_string_cmp()
250 tcount--; in dentry_string_cmp()
275 const unsigned char *cs = READ_ONCE(dentry->d_name.name); in dentry_cmp()
290 return container_of(dentry->d_name.name, struct external_name, name[0]); in external_name()
309 return dentry->d_name.name != dentry->d_iname; in dname_external()
314 spin_lock(&dentry->d_lock); in take_dentry_name_snapshot()
315 name->name = dentry->d_name; in take_dentry_name_snapshot()
317 atomic_inc(&external_name(dentry)->u.count); in take_dentry_name_snapshot()
319 memcpy(name->inline_name, dentry->d_iname, in take_dentry_name_snapshot()
320 dentry->d_name.len + 1); in take_dentry_name_snapshot()
321 name->name.name = name->inline_name; in take_dentry_name_snapshot()
323 spin_unlock(&dentry->d_lock); in take_dentry_name_snapshot()
329 if (unlikely(name->name.name != name->inline_name)) { in release_dentry_name_snapshot()
331 p = container_of(name->name.name, struct external_name, name[0]); in release_dentry_name_snapshot()
332 if (unlikely(atomic_dec_and_test(&p->u.count))) in release_dentry_name_snapshot()
344 dentry->d_inode = inode; in __d_set_inode_and_type()
345 flags = READ_ONCE(dentry->d_flags); in __d_set_inode_and_type()
348 smp_store_release(&dentry->d_flags, flags); in __d_set_inode_and_type()
353 unsigned flags = READ_ONCE(dentry->d_flags); in __d_clear_type_and_inode()
356 WRITE_ONCE(dentry->d_flags, flags); in __d_clear_type_and_inode()
357 dentry->d_inode = NULL; in __d_clear_type_and_inode()
358 if (dentry->d_flags & DCACHE_LRU_LIST) in __d_clear_type_and_inode()
364 WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias)); in dentry_free()
367 if (likely(atomic_dec_and_test(&p->u.count))) { in dentry_free()
368 call_rcu(&dentry->d_u.d_rcu, __d_free_external); in dentry_free()
373 if (dentry->d_flags & DCACHE_NORCU) in dentry_free()
374 __d_free(&dentry->d_u.d_rcu); in dentry_free()
376 call_rcu(&dentry->d_u.d_rcu, __d_free); in dentry_free()
384 __releases(dentry->d_lock) in dentry_unlink_inode()
385 __releases(dentry->d_inode->i_lock) in dentry_unlink_inode()
387 struct inode *inode = dentry->d_inode; in dentry_unlink_inode()
389 raw_write_seqcount_begin(&dentry->d_seq); in dentry_unlink_inode()
391 hlist_del_init(&dentry->d_u.d_alias); in dentry_unlink_inode()
392 raw_write_seqcount_end(&dentry->d_seq); in dentry_unlink_inode()
393 spin_unlock(&dentry->d_lock); in dentry_unlink_inode()
394 spin_unlock(&inode->i_lock); in dentry_unlink_inode()
395 if (!inode->i_nlink) in dentry_unlink_inode()
397 if (dentry->d_op && dentry->d_op->d_iput) in dentry_unlink_inode()
398 dentry->d_op->d_iput(dentry, inode); in dentry_unlink_inode()
405 * is in use - which includes both the "real" per-superblock
411 * The per-cpu "nr_dentry_unused" counters are updated with
414 * The per-cpu "nr_dentry_negative" counters are only updated
415 * when deleted from or added to the per-superblock LRU list, not
422 #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_…
426 dentry->d_flags |= DCACHE_LRU_LIST; in d_lru_add()
431 &dentry->d_sb->s_dentry_lru, &dentry->d_lru)); in d_lru_add()
437 dentry->d_flags &= ~DCACHE_LRU_LIST; in d_lru_del()
442 &dentry->d_sb->s_dentry_lru, &dentry->d_lru)); in d_lru_del()
448 list_del_init(&dentry->d_lru); in d_shrink_del()
449 dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST); in d_shrink_del()
456 list_add(&dentry->d_lru, list); in d_shrink_add()
457 dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST; in d_shrink_add()
470 dentry->d_flags &= ~DCACHE_LRU_LIST; in d_lru_isolate()
474 list_lru_isolate(lru, &dentry->d_lru); in d_lru_isolate()
481 dentry->d_flags |= DCACHE_SHRINK_LIST; in d_lru_shrink_move()
484 list_lru_isolate_move(lru, &dentry->d_lru, list); in d_lru_shrink_move()
496 b = &dentry->d_sb->s_roots; in ___d_drop()
498 b = d_hash(dentry->d_name.hash); in ___d_drop()
501 __hlist_bl_del(&dentry->d_hash); in ___d_drop()
509 dentry->d_hash.pprev = NULL; in __d_drop()
510 write_seqcount_invalidate(&dentry->d_seq); in __d_drop()
516 * d_drop - drop a dentry
519 * d_drop() unhashes the entry from the parent dentry hashes, so that it won't
521 * deleting the dentry - d_delete will try to mark the dentry negative if
528 * __d_drop requires dentry->d_lock
531 * (dentry->d_hash.pprev will be LIST_POISON2, not NULL).
535 spin_lock(&dentry->d_lock); in d_drop()
537 spin_unlock(&dentry->d_lock); in d_drop()
548 dentry->d_flags |= DCACHE_DENTRY_KILLED; in dentry_unlist()
549 if (unlikely(hlist_unhashed(&dentry->d_sib))) in dentry_unlist()
551 __hlist_del(&dentry->d_sib); in dentry_unlist()
554 * a normal list member, it didn't matter - ->d_sib.next would've in dentry_unlist()
557 * Normally d_walk() doesn't care about cursors moving around - in dentry_unlist()
558 * ->d_lock on parent prevents that and since a cursor has no children in dentry_unlist()
559 * of its own, we get through it without ever unlocking the parent. in dentry_unlist()
560 * There is one exception, though - if we ascend from a child that in dentry_unlist()
562 * using the value left in its ->d_sib.next. And if _that_ in dentry_unlist()
564 * before d_walk() regains parent->d_lock, we'll end up skipping in dentry_unlist()
567 * Solution: make sure that the pointer left behind in ->d_sib.next in dentry_unlist()
571 while (dentry->d_sib.next) { in dentry_unlist()
572 next = hlist_entry(dentry->d_sib.next, struct dentry, d_sib); in dentry_unlist()
573 if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR))) in dentry_unlist()
575 dentry->d_sib.next = next->d_sib.next; in dentry_unlist()
581 struct dentry *parent = NULL; in __dentry_kill() local
587 lockref_mark_dead(&dentry->d_lockref); in __dentry_kill()
593 if (dentry->d_flags & DCACHE_OP_PRUNE) in __dentry_kill()
594 dentry->d_op->d_prune(dentry); in __dentry_kill()
596 if (dentry->d_flags & DCACHE_LRU_LIST) { in __dentry_kill()
597 if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) in __dentry_kill()
602 if (dentry->d_inode) in __dentry_kill()
605 spin_unlock(&dentry->d_lock); in __dentry_kill()
607 if (dentry->d_op && dentry->d_op->d_release) in __dentry_kill()
608 dentry->d_op->d_release(dentry); in __dentry_kill()
611 /* now that it's negative, ->d_parent is stable */ in __dentry_kill()
613 parent = dentry->d_parent; in __dentry_kill()
614 spin_lock(&parent->d_lock); in __dentry_kill()
616 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); in __dentry_kill()
618 if (dentry->d_flags & DCACHE_SHRINK_LIST) in __dentry_kill()
620 spin_unlock(&dentry->d_lock); in __dentry_kill()
623 if (parent && --parent->d_lockref.count) { in __dentry_kill()
624 spin_unlock(&parent->d_lock); in __dentry_kill()
627 return parent; in __dentry_kill()
632 * Called under rcu_read_lock() and dentry->d_lock; the former
638 * that dentry's inode locked.
643 struct inode *inode = dentry->d_inode; in lock_for_kill()
645 if (unlikely(dentry->d_lockref.count)) in lock_for_kill()
648 if (!inode || likely(spin_trylock(&inode->i_lock))) in lock_for_kill()
652 spin_unlock(&dentry->d_lock); in lock_for_kill()
653 spin_lock(&inode->i_lock); in lock_for_kill()
654 spin_lock(&dentry->d_lock); in lock_for_kill()
655 if (likely(inode == dentry->d_inode)) in lock_for_kill()
657 spin_unlock(&inode->i_lock); in lock_for_kill()
658 inode = dentry->d_inode; in lock_for_kill()
660 if (likely(!dentry->d_lockref.count)) in lock_for_kill()
663 spin_unlock(&inode->i_lock); in lock_for_kill()
669 * locked; if not locked, we are more limited and might not be able to tell
670 * without a lock. False in this case means "punt to locked path and recheck".
672 * In case we aren't locked, these predicates are not "stable". However, it is
675 * re-gotten a reference to the dentry and change that, but our work is done -
678 static inline bool retain_dentry(struct dentry *dentry, bool locked) in retain_dentry() argument
683 d_flags = READ_ONCE(dentry->d_flags); in retain_dentry()
693 // ->d_delete() might tell us not to bother, but that requires in retain_dentry()
694 // ->d_lock; can't decide without it in retain_dentry()
696 if (!locked || dentry->d_op->d_delete(dentry)) in retain_dentry()
705 // need to do something - put it on LRU if it wasn't there already in retain_dentry()
707 // Unfortunately, both actions require ->d_lock, so in lockless in retain_dentry()
710 if (!locked) in retain_dentry()
714 if (!locked) in retain_dentry()
716 dentry->d_flags |= DCACHE_REFERENCED; in retain_dentry()
725 spin_lock(&inode->i_lock); in d_mark_dontcache()
726 hlist_for_each_entry(de, &inode->i_dentry, d_u.d_alias) { in d_mark_dontcache()
727 spin_lock(&de->d_lock); in d_mark_dontcache()
728 de->d_flags |= DCACHE_DONTCACHE; in d_mark_dontcache()
729 spin_unlock(&de->d_lock); in d_mark_dontcache()
731 inode->i_state |= I_DONTCACHE; in d_mark_dontcache()
732 spin_unlock(&inode->i_lock); in d_mark_dontcache()
753 ret = lockref_put_return(&dentry->d_lockref); in fast_dput()
761 spin_lock(&dentry->d_lock); in fast_dput()
762 if (WARN_ON_ONCE(dentry->d_lockref.count <= 0)) { in fast_dput()
763 spin_unlock(&dentry->d_lock); in fast_dput()
766 dentry->d_lockref.count--; in fast_dput()
767 goto locked; in fast_dput()
778 * taking the lock? There's a very common case when it's all we need - in fast_dput()
788 * but we'll need to re-check the situation after getting the lock. in fast_dput()
790 spin_lock(&dentry->d_lock); in fast_dput()
798 locked: in fast_dput()
799 if (dentry->d_lockref.count || retain_dentry(dentry, true)) { in fast_dput()
800 spin_unlock(&dentry->d_lock); in fast_dput()
825 * dput - release a dentry
830 * releasing its resources. If the parent dentries were scheduled for release
849 spin_unlock(&dentry->d_lock); in dput()
855 spin_unlock(&dentry->d_lock); in dput()
860 __must_hold(&dentry->d_lock) in to_shrink_list()
862 if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { in to_shrink_list()
863 if (dentry->d_flags & DCACHE_LRU_LIST) in to_shrink_list()
878 spin_unlock(&dentry->d_lock); in dput_to_list()
888 * Do optimistic parent lookup without any in dget_parent()
892 seq = raw_seqcount_begin(&dentry->d_seq); in dget_parent()
893 ret = READ_ONCE(dentry->d_parent); in dget_parent()
894 gotref = lockref_get_not_zero(&ret->d_lockref); in dget_parent()
897 if (!read_seqcount_retry(&dentry->d_seq, seq)) in dget_parent()
904 * Don't need rcu_dereference because we re-check it was correct under in dget_parent()
908 ret = dentry->d_parent; in dget_parent()
909 spin_lock(&ret->d_lock); in dget_parent()
910 if (unlikely(ret != dentry->d_parent)) { in dget_parent()
911 spin_unlock(&ret->d_lock); in dget_parent()
916 BUG_ON(!ret->d_lockref.count); in dget_parent()
917 ret->d_lockref.count++; in dget_parent()
918 spin_unlock(&ret->d_lock); in dget_parent()
927 if (hlist_empty(&inode->i_dentry)) in __d_find_any_alias()
929 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias); in __d_find_any_alias()
930 lockref_get(&alias->d_lockref); in __d_find_any_alias()
935 * d_find_any_alias - find any alias for a given inode
945 spin_lock(&inode->i_lock); in d_find_any_alias()
947 spin_unlock(&inode->i_lock); in d_find_any_alias()
956 if (S_ISDIR(inode->i_mode)) in __d_find_alias()
959 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) { in __d_find_alias()
960 spin_lock(&alias->d_lock); in __d_find_alias()
963 spin_unlock(&alias->d_lock); in __d_find_alias()
966 spin_unlock(&alias->d_lock); in __d_find_alias()
972 * d_find_alias - grab a hashed alias of inode
989 if (!hlist_empty(&inode->i_dentry)) { in d_find_alias()
990 spin_lock(&inode->i_lock); in d_find_alias()
992 spin_unlock(&inode->i_lock); in d_find_alias()
1004 struct hlist_head *l = &inode->i_dentry; in d_find_alias_rcu()
1007 spin_lock(&inode->i_lock); in d_find_alias_rcu()
1008 // ->i_dentry and ->i_rcu are colocated, but the latter won't be in d_find_alias_rcu()
1010 if (likely(!(inode->i_state & I_FREEING) && !hlist_empty(l))) { in d_find_alias_rcu()
1011 if (S_ISDIR(inode->i_mode)) { in d_find_alias_rcu()
1012 de = hlist_entry(l->first, struct dentry, d_u.d_alias); in d_find_alias_rcu()
1019 spin_unlock(&inode->i_lock); in d_find_alias_rcu()
1032 spin_lock(&inode->i_lock); in d_prune_aliases()
1033 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) { in d_prune_aliases()
1034 spin_lock(&dentry->d_lock); in d_prune_aliases()
1035 if (!dentry->d_lockref.count) in d_prune_aliases()
1037 spin_unlock(&dentry->d_lock); in d_prune_aliases()
1039 spin_unlock(&inode->i_lock); in d_prune_aliases()
1053 spin_unlock(&victim->d_lock); in shrink_kill()
1061 dentry = list_entry(list->prev, struct dentry, d_lru); in shrink_dentry_list()
1062 spin_lock(&dentry->d_lock); in shrink_dentry_list()
1068 can_free = dentry->d_flags & DCACHE_DENTRY_KILLED; in shrink_dentry_list()
1069 spin_unlock(&dentry->d_lock); in shrink_dentry_list()
1087 * we are inverting the lru lock/dentry->d_lock here, in dentry_lru_isolate()
1091 if (!spin_trylock(&dentry->d_lock)) in dentry_lru_isolate()
1099 if (dentry->d_lockref.count) { in dentry_lru_isolate()
1101 spin_unlock(&dentry->d_lock); in dentry_lru_isolate()
1105 if (dentry->d_flags & DCACHE_REFERENCED) { in dentry_lru_isolate()
1106 dentry->d_flags &= ~DCACHE_REFERENCED; in dentry_lru_isolate()
1107 spin_unlock(&dentry->d_lock); in dentry_lru_isolate()
1111 * this point, we've dropped the dentry->d_lock but keep the in dentry_lru_isolate()
1132 spin_unlock(&dentry->d_lock); in dentry_lru_isolate()
1138 * prune_dcache_sb - shrink the dcache
1142 * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This
1154 freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc, in prune_dcache_sb()
1167 * we are inverting the lru lock/dentry->d_lock here, in dentry_lru_isolate_shrink()
1171 if (!spin_trylock(&dentry->d_lock)) in dentry_lru_isolate_shrink()
1175 spin_unlock(&dentry->d_lock); in dentry_lru_isolate_shrink()
1182 * shrink_dcache_sb - shrink dcache for a superblock
1193 list_lru_walk(&sb->s_dentry_lru, in shrink_dcache_sb()
1196 } while (list_lru_count(&sb->s_dentry_lru) > 0); in shrink_dcache_sb()
1201 * enum d_walk_ret - action to talke during tree walk
1215 * d_walk - walk the dentry tree
1216 * @parent: start of walk
1222 static void d_walk(struct dentry *parent, void *data, in d_walk() argument
1232 this_parent = parent; in d_walk()
1233 spin_lock(&this_parent->d_lock); in d_walk()
1250 if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR)) in d_walk()
1253 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); in d_walk()
1260 spin_unlock(&dentry->d_lock); in d_walk()
1266 spin_unlock(&dentry->d_lock); in d_walk()
1270 if (!hlist_empty(&dentry->d_children)) { in d_walk()
1271 spin_unlock(&this_parent->d_lock); in d_walk()
1272 spin_release(&dentry->d_lock.dep_map, _RET_IP_); in d_walk()
1274 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); in d_walk()
1277 spin_unlock(&dentry->d_lock); in d_walk()
1284 if (this_parent != parent) { in d_walk()
1286 this_parent = dentry->d_parent; in d_walk()
1288 spin_unlock(&dentry->d_lock); in d_walk()
1289 spin_lock(&this_parent->d_lock); in d_walk()
1291 /* might go back up the wrong parent if we have had a rename. */ in d_walk()
1296 if (likely(!(dentry->d_flags & DCACHE_DENTRY_KILLED))) { in d_walk()
1308 spin_unlock(&this_parent->d_lock); in d_walk()
1313 spin_unlock(&this_parent->d_lock); in d_walk()
1330 struct path path = { .mnt = info->mnt, .dentry = dentry }; in path_check_mount()
1335 info->mounted = 1; in path_check_mount()
1342 * path_has_submounts - check for mounts over a dentry in the
1344 * @parent: path to check.
1346 * Return true if the parent or its subdirectories contain
1349 int path_has_submounts(const struct path *parent) in path_has_submounts() argument
1351 struct check_mount data = { .mnt = parent->mnt, .mounted = 0 }; in path_has_submounts()
1354 d_walk(parent->dentry, &data, path_check_mount); in path_has_submounts()
1372 int ret = -ENOENT; in d_set_mounted()
1374 for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) { in d_set_mounted()
1376 spin_lock(&p->d_lock); in d_set_mounted()
1378 spin_unlock(&p->d_lock); in d_set_mounted()
1381 spin_unlock(&p->d_lock); in d_set_mounted()
1383 spin_lock(&dentry->d_lock); in d_set_mounted()
1385 ret = -EBUSY; in d_set_mounted()
1387 dentry->d_flags |= DCACHE_MOUNTED; in d_set_mounted()
1391 spin_unlock(&dentry->d_lock); in d_set_mounted()
1398 * Search the dentry child list of the specified parent,
1401 * whenever the d_children list is non-empty and continue
1426 if (data->start == dentry) in select_collect()
1429 if (dentry->d_flags & DCACHE_SHRINK_LIST) { in select_collect()
1430 data->found++; in select_collect()
1431 } else if (!dentry->d_lockref.count) { in select_collect()
1432 to_shrink_list(dentry, &data->dispose); in select_collect()
1433 data->found++; in select_collect()
1434 } else if (dentry->d_lockref.count < 0) { in select_collect()
1435 data->found++; in select_collect()
1442 if (!list_empty(&data->dispose)) in select_collect()
1453 if (data->start == dentry) in select_collect2()
1456 if (!dentry->d_lockref.count) { in select_collect2()
1457 if (dentry->d_flags & DCACHE_SHRINK_LIST) { in select_collect2()
1459 data->victim = dentry; in select_collect2()
1462 to_shrink_list(dentry, &data->dispose); in select_collect2()
1469 if (!list_empty(&data->dispose)) in select_collect2()
1476 * shrink_dcache_parent - prune dcache
1477 * @parent: parent of entries to prune
1479 * Prune the dcache to remove unused children of the parent dentry.
1481 void shrink_dcache_parent(struct dentry *parent) in shrink_dcache_parent() argument
1484 struct select_data data = {.start = parent}; in shrink_dcache_parent()
1487 d_walk(parent, &data, select_collect); in shrink_dcache_parent()
1498 d_walk(parent, &data, select_collect2); in shrink_dcache_parent()
1500 spin_lock(&data.victim->d_lock); in shrink_dcache_parent()
1502 spin_unlock(&data.victim->d_lock); in shrink_dcache_parent()
1517 if (!hlist_empty(&dentry->d_children)) in umount_check()
1521 if (dentry == _data && dentry->d_lockref.count == 1) in umount_check()
1527 dentry->d_inode ? in umount_check()
1528 dentry->d_inode->i_ino : 0UL, in umount_check()
1530 dentry->d_lockref.count, in umount_check()
1531 dentry->d_sb->s_type->name, in umount_check()
1532 dentry->d_sb->s_id); in umount_check()
1551 WARN(down_read_trylock(&sb->s_umount), "s_umount should've been locked"); in shrink_dcache_for_umount()
1553 dentry = sb->s_root; in shrink_dcache_for_umount()
1554 sb->s_root = NULL; in shrink_dcache_for_umount()
1557 while (!hlist_bl_empty(&sb->s_roots)) { in shrink_dcache_for_umount()
1558 dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_roots), struct dentry, d_hash)); in shrink_dcache_for_umount()
1574 * d_invalidate - detach submounts, prune dcache, and drop
1580 spin_lock(&dentry->d_lock); in d_invalidate()
1582 spin_unlock(&dentry->d_lock); in d_invalidate()
1586 spin_unlock(&dentry->d_lock); in d_invalidate()
1589 if (!dentry->d_inode) in d_invalidate()
1609 * __d_alloc - allocate a dcache entry
1624 dentry = kmem_cache_alloc_lru(dentry_cache, &sb->s_dentry_lru, in __d_alloc()
1630 * We guarantee that the inline name is always NUL-terminated. in __d_alloc()
1635 dentry->d_iname[DNAME_INLINE_LEN-1] = 0; in __d_alloc()
1638 dname = dentry->d_iname; in __d_alloc()
1639 } else if (name->len > DNAME_INLINE_LEN-1) { in __d_alloc()
1641 struct external_name *p = kmalloc(size + name->len, in __d_alloc()
1648 atomic_set(&p->u.count, 1); in __d_alloc()
1649 dname = p->name; in __d_alloc()
1651 dname = dentry->d_iname; in __d_alloc()
1654 dentry->d_name.len = name->len; in __d_alloc()
1655 dentry->d_name.hash = name->hash; in __d_alloc()
1656 memcpy(dname, name->name, name->len); in __d_alloc()
1657 dname[name->len] = 0; in __d_alloc()
1660 smp_store_release(&dentry->d_name.name, dname); /* ^^^ */ in __d_alloc()
1662 dentry->d_lockref.count = 1; in __d_alloc()
1663 dentry->d_flags = 0; in __d_alloc()
1664 spin_lock_init(&dentry->d_lock); in __d_alloc()
1665 seqcount_spinlock_init(&dentry->d_seq, &dentry->d_lock); in __d_alloc()
1666 dentry->d_inode = NULL; in __d_alloc()
1667 dentry->d_parent = dentry; in __d_alloc()
1668 dentry->d_sb = sb; in __d_alloc()
1669 dentry->d_op = NULL; in __d_alloc()
1670 dentry->d_fsdata = NULL; in __d_alloc()
1671 INIT_HLIST_BL_NODE(&dentry->d_hash); in __d_alloc()
1672 INIT_LIST_HEAD(&dentry->d_lru); in __d_alloc()
1673 INIT_HLIST_HEAD(&dentry->d_children); in __d_alloc()
1674 INIT_HLIST_NODE(&dentry->d_u.d_alias); in __d_alloc()
1675 INIT_HLIST_NODE(&dentry->d_sib); in __d_alloc()
1676 d_set_d_op(dentry, dentry->d_sb->s_d_op); in __d_alloc()
1678 if (dentry->d_op && dentry->d_op->d_init) { in __d_alloc()
1679 err = dentry->d_op->d_init(dentry); in __d_alloc()
1694 * d_alloc - allocate a dcache entry
1695 * @parent: parent of entry to allocate
1702 struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) in d_alloc() argument
1704 struct dentry *dentry = __d_alloc(parent->d_sb, name); in d_alloc()
1707 spin_lock(&parent->d_lock); in d_alloc()
1712 dentry->d_parent = dget_dlock(parent); in d_alloc()
1713 hlist_add_head(&dentry->d_sib, &parent->d_children); in d_alloc()
1714 spin_unlock(&parent->d_lock); in d_alloc()
1726 struct dentry *d_alloc_cursor(struct dentry * parent) in d_alloc_cursor() argument
1728 struct dentry *dentry = d_alloc_anon(parent->d_sb); in d_alloc_cursor()
1730 dentry->d_flags |= DCACHE_DENTRY_CURSOR; in d_alloc_cursor()
1731 dentry->d_parent = dget(parent); in d_alloc_cursor()
1737 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1743 * This is used for pipes, sockets et.al. - the stuff that should
1758 dentry->d_flags |= DCACHE_NORCU; in d_alloc_pseudo()
1759 if (!sb->s_d_op) in d_alloc_pseudo()
1765 struct dentry *d_alloc_name(struct dentry *parent, const char *name) in d_alloc_name() argument
1770 q.hash_len = hashlen_string(parent, name); in d_alloc_name()
1771 return d_alloc(parent, &q); in d_alloc_name()
1777 WARN_ON_ONCE(dentry->d_op); in d_set_d_op()
1778 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH | in d_set_d_op()
1784 dentry->d_op = op; in d_set_d_op()
1787 if (op->d_hash) in d_set_d_op()
1788 dentry->d_flags |= DCACHE_OP_HASH; in d_set_d_op()
1789 if (op->d_compare) in d_set_d_op()
1790 dentry->d_flags |= DCACHE_OP_COMPARE; in d_set_d_op()
1791 if (op->d_revalidate) in d_set_d_op()
1792 dentry->d_flags |= DCACHE_OP_REVALIDATE; in d_set_d_op()
1793 if (op->d_weak_revalidate) in d_set_d_op()
1794 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE; in d_set_d_op()
1795 if (op->d_delete) in d_set_d_op()
1796 dentry->d_flags |= DCACHE_OP_DELETE; in d_set_d_op()
1797 if (op->d_prune) in d_set_d_op()
1798 dentry->d_flags |= DCACHE_OP_PRUNE; in d_set_d_op()
1799 if (op->d_real) in d_set_d_op()
1800 dentry->d_flags |= DCACHE_OP_REAL; in d_set_d_op()
1812 if (S_ISDIR(inode->i_mode)) { in d_flags_for_inode()
1814 if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) { in d_flags_for_inode()
1815 if (unlikely(!inode->i_op->lookup)) in d_flags_for_inode()
1818 inode->i_opflags |= IOP_LOOKUP; in d_flags_for_inode()
1823 if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) { in d_flags_for_inode()
1824 if (unlikely(inode->i_op->get_link)) { in d_flags_for_inode()
1828 inode->i_opflags |= IOP_NOFOLLOW; in d_flags_for_inode()
1831 if (unlikely(!S_ISREG(inode->i_mode))) in d_flags_for_inode()
1845 spin_lock(&dentry->d_lock); in __d_instantiate()
1849 if (dentry->d_flags & DCACHE_LRU_LIST) in __d_instantiate()
1851 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); in __d_instantiate()
1852 raw_write_seqcount_begin(&dentry->d_seq); in __d_instantiate()
1854 raw_write_seqcount_end(&dentry->d_seq); in __d_instantiate()
1856 spin_unlock(&dentry->d_lock); in __d_instantiate()
1860 * d_instantiate - fill in inode information for a dentry
1876 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias)); in d_instantiate()
1879 spin_lock(&inode->i_lock); in d_instantiate()
1881 spin_unlock(&inode->i_lock); in d_instantiate()
1888 * with lockdep-related part of unlock_new_inode() done before
1889 * anything else. Use that instead of open-coding d_instantiate()/
1894 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias)); in d_instantiate_new()
1898 spin_lock(&inode->i_lock); in d_instantiate_new()
1900 WARN_ON(!(inode->i_state & I_NEW)); in d_instantiate_new()
1901 inode->i_state &= ~I_NEW & ~I_CREATING; in d_instantiate_new()
1903 wake_up_bit(&inode->i_state, __I_NEW); in d_instantiate_new()
1904 spin_unlock(&inode->i_lock); in d_instantiate_new()
1913 res = d_alloc_anon(root_inode->i_sb); in d_make_root()
1929 return ERR_PTR(-ESTALE); in __d_obtain_alias()
1933 sb = inode->i_sb; in __d_obtain_alias()
1941 res = ERR_PTR(-ENOMEM); in __d_obtain_alias()
1946 spin_lock(&inode->i_lock); in __d_obtain_alias()
1954 spin_lock(&new->d_lock); in __d_obtain_alias()
1956 hlist_add_head(&new->d_u.d_alias, &inode->i_dentry); in __d_obtain_alias()
1958 hlist_bl_lock(&sb->s_roots); in __d_obtain_alias()
1959 hlist_bl_add_head(&new->d_hash, &sb->s_roots); in __d_obtain_alias()
1960 hlist_bl_unlock(&sb->s_roots); in __d_obtain_alias()
1962 spin_unlock(&new->d_lock); in __d_obtain_alias()
1963 spin_unlock(&inode->i_lock); in __d_obtain_alias()
1964 inode = NULL; /* consumed by new->d_inode */ in __d_obtain_alias()
1967 spin_unlock(&inode->i_lock); in __d_obtain_alias()
1977 * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode
1992 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
2001 * d_obtain_root - find or allocate a dentry for a given inode
2013 * replaced by ERR_PTR(-ESTALE).
2022 * d_add_ci - lookup or allocate new dentry with case-exact name
2023 * @inode: the inode case-insensitive lookup has found
2024 * @dentry: the negative dentry that was passed to the parent's lookup func
2025 * @name: the case-exact name to be associated with the returned dentry
2027 * This is to avoid filling the dcache with case-insensitive names to the
2029 * case-insensitive filesystems.
2031 * For a case-insensitive lookup match and if the case-exact dentry
2046 found = d_hash_and_lookup(dentry->d_parent, name); in d_add_ci()
2052 found = d_alloc_parallel(dentry->d_parent, name, in d_add_ci()
2053 dentry->d_wait); in d_add_ci()
2059 found = d_alloc(dentry->d_parent, name); in d_add_ci()
2062 return ERR_PTR(-ENOMEM); in d_add_ci()
2076 * d_same_name - compare dentry name with case-exact name
2077 * @parent: parent dentry
2078 * @dentry: the negative dentry that was passed to the parent's lookup func
2079 * @name: the case-exact name to be associated with the returned dentry
2083 bool d_same_name(const struct dentry *dentry, const struct dentry *parent, in d_same_name() argument
2086 if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) { in d_same_name()
2087 if (dentry->d_name.len != name->len) in d_same_name()
2089 return dentry_cmp(dentry, name->name, name->len) == 0; in d_same_name()
2091 return parent->d_op->d_compare(dentry, in d_same_name()
2092 dentry->d_name.len, dentry->d_name.name, in d_same_name()
2098 * This is __d_lookup_rcu() when the parent dentry has
2102 const struct dentry *parent, in __d_lookup_rcu_op_compare() argument
2106 u64 hashlen = name->hash_len; in __d_lookup_rcu_op_compare()
2117 seq = raw_seqcount_begin(&dentry->d_seq); in __d_lookup_rcu_op_compare()
2118 if (dentry->d_parent != parent) in __d_lookup_rcu_op_compare()
2122 if (dentry->d_name.hash != hashlen_hash(hashlen)) in __d_lookup_rcu_op_compare()
2124 tlen = dentry->d_name.len; in __d_lookup_rcu_op_compare()
2125 tname = dentry->d_name.name; in __d_lookup_rcu_op_compare()
2127 if (read_seqcount_retry(&dentry->d_seq, seq)) { in __d_lookup_rcu_op_compare()
2131 if (parent->d_op->d_compare(dentry, tlen, tname, name) != 0) in __d_lookup_rcu_op_compare()
2140 * __d_lookup_rcu - search for a dentry (racy, store-free)
2141 * @parent: parent dentry
2146 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2147 * resolution (store-free path walking) design described in
2148 * Documentation/filesystems/path-lookup.txt.
2152 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2161 * the returned dentry, so long as its parent's seqlock is checked after the
2168 struct dentry *__d_lookup_rcu(const struct dentry *parent, in __d_lookup_rcu() argument
2172 u64 hashlen = name->hash_len; in __d_lookup_rcu()
2173 const unsigned char *str = name->name; in __d_lookup_rcu()
2185 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) in __d_lookup_rcu()
2186 return __d_lookup_rcu_op_compare(parent, name, seqp); in __d_lookup_rcu()
2196 * false-negative result. d_lookup() protects against concurrent in __d_lookup_rcu()
2199 * See Documentation/filesystems/path-lookup.txt for more details. in __d_lookup_rcu()
2206 * renames, and thus protects parent and name fields. in __d_lookup_rcu()
2219 * we are still guaranteed NUL-termination of ->d_name.name. in __d_lookup_rcu()
2221 seq = raw_seqcount_begin(&dentry->d_seq); in __d_lookup_rcu()
2222 if (dentry->d_parent != parent) in __d_lookup_rcu()
2226 if (dentry->d_name.hash_len != hashlen) in __d_lookup_rcu()
2237 * d_lookup - search for a dentry
2238 * @parent: parent dentry
2242 * d_lookup searches the children of the parent dentry for the name in
2247 struct dentry *d_lookup(const struct dentry *parent, const struct qstr *name) in d_lookup() argument
2254 dentry = __d_lookup(parent, name); in d_lookup()
2263 * __d_lookup - search for a dentry (racy)
2264 * @parent: parent dentry
2269 * false-negative result due to unrelated rename activity.
2277 struct dentry *__d_lookup(const struct dentry *parent, const struct qstr *name) in __d_lookup() argument
2279 unsigned int hash = name->hash; in __d_lookup()
2300 * false-negative result. d_lookup() protects against concurrent in __d_lookup()
2303 * See Documentation/filesystems/path-lookup.txt for more details. in __d_lookup()
2309 if (dentry->d_name.hash != hash) in __d_lookup()
2312 spin_lock(&dentry->d_lock); in __d_lookup()
2313 if (dentry->d_parent != parent) in __d_lookup()
2318 if (!d_same_name(dentry, parent, name)) in __d_lookup()
2321 dentry->d_lockref.count++; in __d_lookup()
2323 spin_unlock(&dentry->d_lock); in __d_lookup()
2326 spin_unlock(&dentry->d_lock); in __d_lookup()
2334 * d_hash_and_lookup - hash the qstr then search for a dentry
2338 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2343 * Check for a fs-specific hash function. Note that we must in d_hash_and_lookup()
2344 * calculate the standard hash first, as the d_op->d_hash() in d_hash_and_lookup()
2347 name->hash = full_name_hash(dir, name->name, name->len); in d_hash_and_lookup()
2348 if (dir->d_flags & DCACHE_OP_HASH) { in d_hash_and_lookup()
2349 int err = dir->d_op->d_hash(dir, name); in d_hash_and_lookup()
2359 * - turn this dentry into a negative dentry
2360 * - unhash this dentry and free it.
2371 * d_delete - delete a dentry
2380 struct inode *inode = dentry->d_inode; in d_delete()
2382 spin_lock(&inode->i_lock); in d_delete()
2383 spin_lock(&dentry->d_lock); in d_delete()
2387 if (dentry->d_lockref.count == 1) { in d_delete()
2388 dentry->d_flags &= ~DCACHE_CANT_MOUNT; in d_delete()
2392 spin_unlock(&dentry->d_lock); in d_delete()
2393 spin_unlock(&inode->i_lock); in d_delete()
2400 struct hlist_bl_head *b = d_hash(entry->d_name.hash); in __d_rehash()
2403 hlist_bl_add_head_rcu(&entry->d_hash, b); in __d_rehash()
2408 * d_rehash - add an entry back to the hash
2416 spin_lock(&entry->d_lock); in d_rehash()
2418 spin_unlock(&entry->d_lock); in d_rehash()
2426 unsigned n = dir->i_dir_seq; in start_dir_add()
2427 if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n) in start_dir_add()
2436 smp_store_release(&dir->i_dir_seq, n + 2); in end_dir_add()
2445 add_wait_queue(dentry->d_wait, &wait); in d_wait_lookup()
2448 spin_unlock(&dentry->d_lock); in d_wait_lookup()
2450 spin_lock(&dentry->d_lock); in d_wait_lookup()
2455 struct dentry *d_alloc_parallel(struct dentry *parent, in d_alloc_parallel() argument
2459 unsigned int hash = name->hash; in d_alloc_parallel()
2460 struct hlist_bl_head *b = in_lookup_hash(parent, hash); in d_alloc_parallel()
2462 struct dentry *new = d_alloc(parent, name); in d_alloc_parallel()
2467 return ERR_PTR(-ENOMEM); in d_alloc_parallel()
2471 seq = smp_load_acquire(&parent->d_inode->i_dir_seq); in d_alloc_parallel()
2473 dentry = __d_lookup_rcu(parent, name, &d_seq); in d_alloc_parallel()
2475 if (!lockref_get_not_dead(&dentry->d_lockref)) { in d_alloc_parallel()
2479 if (read_seqcount_retry(&dentry->d_seq, d_seq)) { in d_alloc_parallel()
2499 if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) { in d_alloc_parallel()
2505 * No changes for the parent since the beginning of d_lookup(). in d_alloc_parallel()
2507 * any potential in-lookup matches are going to stay here until in d_alloc_parallel()
2512 if (dentry->d_name.hash != hash) in d_alloc_parallel()
2514 if (dentry->d_parent != parent) in d_alloc_parallel()
2516 if (!d_same_name(dentry, parent, name)) in d_alloc_parallel()
2520 if (!lockref_get_not_dead(&dentry->d_lockref)) { in d_alloc_parallel()
2530 spin_lock(&dentry->d_lock); in d_alloc_parallel()
2533 * it's not in-lookup anymore; in principle we should repeat in d_alloc_parallel()
2538 if (unlikely(dentry->d_name.hash != hash)) in d_alloc_parallel()
2540 if (unlikely(dentry->d_parent != parent)) in d_alloc_parallel()
2544 if (unlikely(!d_same_name(dentry, parent, name))) in d_alloc_parallel()
2547 spin_unlock(&dentry->d_lock); in d_alloc_parallel()
2552 /* we can't take ->d_lock here; it's OK, though. */ in d_alloc_parallel()
2553 new->d_flags |= DCACHE_PAR_LOOKUP; in d_alloc_parallel()
2554 new->d_wait = wq; in d_alloc_parallel()
2555 hlist_bl_add_head(&new->d_u.d_in_lookup_hash, b); in d_alloc_parallel()
2559 spin_unlock(&dentry->d_lock); in d_alloc_parallel()
2566 * - Unhash the dentry
2567 * - Retrieve and clear the waitqueue head in dentry
2568 * - Return the waitqueue head
2575 lockdep_assert_held(&dentry->d_lock); in __d_lookup_unhash()
2577 b = in_lookup_hash(dentry->d_parent, dentry->d_name.hash); in __d_lookup_unhash()
2579 dentry->d_flags &= ~DCACHE_PAR_LOOKUP; in __d_lookup_unhash()
2580 __hlist_bl_del(&dentry->d_u.d_in_lookup_hash); in __d_lookup_unhash()
2581 d_wait = dentry->d_wait; in __d_lookup_unhash()
2582 dentry->d_wait = NULL; in __d_lookup_unhash()
2584 INIT_HLIST_NODE(&dentry->d_u.d_alias); in __d_lookup_unhash()
2585 INIT_LIST_HEAD(&dentry->d_lru); in __d_lookup_unhash()
2591 spin_lock(&dentry->d_lock); in __d_lookup_unhash_wake()
2593 spin_unlock(&dentry->d_lock); in __d_lookup_unhash_wake()
2597 /* inode->i_lock held if inode is non-NULL */
2604 spin_lock(&dentry->d_lock); in __d_add()
2606 dir = dentry->d_parent->d_inode; in __d_add()
2612 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); in __d_add()
2613 raw_write_seqcount_begin(&dentry->d_seq); in __d_add()
2615 raw_write_seqcount_end(&dentry->d_seq); in __d_add()
2621 spin_unlock(&dentry->d_lock); in __d_add()
2623 spin_unlock(&inode->i_lock); in __d_add()
2627 * d_add - add dentry to hash queues
2639 spin_lock(&inode->i_lock); in d_add()
2646 * d_exact_alias - find and hash an exact unhashed alias
2650 * If an unhashed dentry with the same name/parent and desired
2654 * Parent directory should be locked.
2659 unsigned int hash = entry->d_name.hash; in d_exact_alias()
2661 spin_lock(&inode->i_lock); in d_exact_alias()
2662 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) { in d_exact_alias()
2664 * Don't need alias->d_lock here, because aliases with in d_exact_alias()
2665 * d_parent == entry->d_parent are not subject to name or in d_exact_alias()
2666 * parent changes, because the parent inode i_mutex is held. in d_exact_alias()
2668 if (alias->d_name.hash != hash) in d_exact_alias()
2670 if (alias->d_parent != entry->d_parent) in d_exact_alias()
2672 if (!d_same_name(alias, entry->d_parent, &entry->d_name)) in d_exact_alias()
2674 spin_lock(&alias->d_lock); in d_exact_alias()
2676 spin_unlock(&alias->d_lock); in d_exact_alias()
2681 spin_unlock(&alias->d_lock); in d_exact_alias()
2683 spin_unlock(&inode->i_lock); in d_exact_alias()
2686 spin_unlock(&inode->i_lock); in d_exact_alias()
2698 swap(target->d_name.name, dentry->d_name.name); in swap_names()
2704 memcpy(target->d_iname, dentry->d_name.name, in swap_names()
2705 dentry->d_name.len + 1); in swap_names()
2706 dentry->d_name.name = target->d_name.name; in swap_names()
2707 target->d_name.name = target->d_iname; in swap_names()
2715 memcpy(dentry->d_iname, target->d_name.name, in swap_names()
2716 target->d_name.len + 1); in swap_names()
2717 target->d_name.name = dentry->d_name.name; in swap_names()
2718 dentry->d_name.name = dentry->d_iname; in swap_names()
2726 swap(((long *) &dentry->d_iname)[i], in swap_names()
2727 ((long *) &target->d_iname)[i]); in swap_names()
2731 swap(dentry->d_name.hash_len, target->d_name.hash_len); in swap_names()
2740 atomic_inc(&external_name(target)->u.count); in copy_name()
2741 dentry->d_name = target->d_name; in copy_name()
2743 memcpy(dentry->d_iname, target->d_name.name, in copy_name()
2744 target->d_name.len + 1); in copy_name()
2745 dentry->d_name.name = dentry->d_iname; in copy_name()
2746 dentry->d_name.hash_len = target->d_name.hash_len; in copy_name()
2748 if (old_name && likely(atomic_dec_and_test(&old_name->u.count))) in copy_name()
2753 * __d_move - move a dentry
2761 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2771 WARN_ON(!dentry->d_inode); in __d_move()
2776 old_parent = dentry->d_parent; in __d_move()
2780 spin_lock(&target->d_parent->d_lock); in __d_move()
2782 /* target is not a descendent of dentry->d_parent */ in __d_move()
2783 spin_lock(&target->d_parent->d_lock); in __d_move()
2784 spin_lock_nested(&old_parent->d_lock, DENTRY_D_LOCK_NESTED); in __d_move()
2787 spin_lock(&old_parent->d_lock); in __d_move()
2789 spin_lock_nested(&target->d_parent->d_lock, in __d_move()
2792 spin_lock_nested(&dentry->d_lock, 2); in __d_move()
2793 spin_lock_nested(&target->d_lock, 3); in __d_move()
2796 dir = target->d_parent->d_inode; in __d_move()
2801 write_seqcount_begin(&dentry->d_seq); in __d_move()
2802 write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED); in __d_move()
2811 dentry->d_parent = target->d_parent; in __d_move()
2814 target->d_hash.pprev = NULL; in __d_move()
2815 dentry->d_parent->d_lockref.count++; in __d_move()
2817 WARN_ON(!--old_parent->d_lockref.count); in __d_move()
2819 target->d_parent = old_parent; in __d_move()
2821 if (!hlist_unhashed(&target->d_sib)) in __d_move()
2822 __hlist_del(&target->d_sib); in __d_move()
2823 hlist_add_head(&target->d_sib, &target->d_parent->d_children); in __d_move()
2827 if (!hlist_unhashed(&dentry->d_sib)) in __d_move()
2828 __hlist_del(&dentry->d_sib); in __d_move()
2829 hlist_add_head(&dentry->d_sib, &dentry->d_parent->d_children); in __d_move()
2834 write_seqcount_end(&target->d_seq); in __d_move()
2835 write_seqcount_end(&dentry->d_seq); in __d_move()
2840 if (dentry->d_parent != old_parent) in __d_move()
2841 spin_unlock(&dentry->d_parent->d_lock); in __d_move()
2843 spin_unlock(&old_parent->d_lock); in __d_move()
2844 spin_unlock(&target->d_lock); in __d_move()
2845 spin_unlock(&dentry->d_lock); in __d_move()
2849 * d_move - move a dentry
2866 * d_exchange - exchange two dentries
2874 WARN_ON(!dentry1->d_inode); in d_exchange()
2875 WARN_ON(!dentry2->d_inode); in d_exchange()
2885 * d_ancestor - search for an ancestor
2896 for (p = p2; !IS_ROOT(p); p = p->d_parent) { in d_ancestor()
2897 if (p->d_parent == p1) in d_ancestor()
2907 * dentry->d_parent->d_inode->i_mutex, and rename_lock
2916 int ret = -ESTALE; in __d_unalias()
2918 /* If alias and dentry share a parent, then no extra locks required */ in __d_unalias()
2919 if (alias->d_parent == dentry->d_parent) in __d_unalias()
2923 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex)) in __d_unalias()
2925 m1 = &dentry->d_sb->s_vfs_rename_mutex; in __d_unalias()
2926 if (!inode_trylock_shared(alias->d_parent->d_inode)) in __d_unalias()
2928 m2 = &alias->d_parent->d_inode->i_rwsem; in __d_unalias()
2941 * d_splice_alias - splice a disconnected dentry into the tree if one exists
2949 * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
2956 * is returned. This matches the expected return value of ->lookup.
2974 spin_lock(&inode->i_lock); in d_splice_alias()
2975 if (S_ISDIR(inode->i_mode)) { in d_splice_alias()
2979 spin_unlock(&inode->i_lock); in d_splice_alias()
2984 new = ERR_PTR(-ELOOP); in d_splice_alias()
2988 dentry->d_name.name, in d_splice_alias()
2989 inode->i_sb->s_type->name, in d_splice_alias()
2990 inode->i_sb->s_id); in d_splice_alias()
2992 struct dentry *old_parent = dget(new->d_parent); in d_splice_alias()
3021 * is_subdir - is new dentry a subdirectory of old_dentry
3025 * Returns true if new_dentry is a subdirectory of the parent (at any depth).
3061 if (d_unhashed(dentry) || !dentry->d_inode) in d_genocide_kill()
3064 if (!(dentry->d_flags & DCACHE_GENOCIDE)) { in d_genocide_kill()
3065 dentry->d_flags |= DCACHE_GENOCIDE; in d_genocide_kill()
3066 dentry->d_lockref.count--; in d_genocide_kill()
3072 void d_genocide(struct dentry *parent) in d_genocide() argument
3074 d_walk(parent, parent, d_genocide_kill); in d_genocide()
3079 struct dentry *dentry = file->f_path.dentry; in d_mark_tmpfile()
3081 BUG_ON(dentry->d_name.name != dentry->d_iname || in d_mark_tmpfile()
3082 !hlist_unhashed(&dentry->d_u.d_alias) || in d_mark_tmpfile()
3084 spin_lock(&dentry->d_parent->d_lock); in d_mark_tmpfile()
3085 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); in d_mark_tmpfile()
3086 dentry->d_name.len = sprintf(dentry->d_iname, "#%llu", in d_mark_tmpfile()
3087 (unsigned long long)inode->i_ino); in d_mark_tmpfile()
3088 spin_unlock(&dentry->d_lock); in d_mark_tmpfile()
3089 spin_unlock(&dentry->d_parent->d_lock); in d_mark_tmpfile()
3095 struct dentry *dentry = file->f_path.dentry; in d_tmpfile()
3131 d_hash_shift = 32 - d_hash_shift; in dcache_init_early()
3159 d_hash_shift = 32 - d_hash_shift; in dcache_init()