Lines Matching +full:de +full:- +full:serialized

1 // SPDX-License-Identifier: GPL-2.0-only
6 * (C) 1997 Thomas Schoebel-Theuer,
13 * The dcache is a master of the icache - whenever a dcache entry
38 #include <asm/runtime-const.h>
42 * dcache->d_inode->i_lock protects:
43 * - i_dentry, d_u.d_alias, d_inode of aliases
45 * - the dcache hash table
47 * - the s_roots list (see __d_drop)
48 * dentry->d_sb->s_dentry_lru_lock protects:
49 * - the dcache lru lists and counters
51 * - d_flags
52 * - d_name
53 * - d_lru
54 * - d_count
55 * - d_unhashed()
56 * - d_parent and d_chilren
57 * - childrens' d_sib and d_parent
58 * - d_u.d_alias, d_inode
61 * dentry->d_inode->i_lock
62 * dentry->d_lock
63 * dentry->d_sb->s_dentry_lru_lock
68 * dentry->d_parent->...->d_parent->d_lock
70 * dentry->d_parent->d_lock
71 * dentry->d_lock
74 * arbitrary, since it's serialized on rename_lock
95 * to make this good - I've just made it work.
97 * This hash-function tries to avoid losing too many bits of hash
98 * information, yet avoid using a prime hash-size or similar.
146 * Here we resort to our own counters instead of using generic per-cpu counters
196 .procname = "dentry-state",
213 * Compare 2 name strings, return 0 if they match, otherwise non-zero.
214 * The strings are both count bytes long, and count is non-zero.
218 #include <asm/word-at-a-time.h>
241 tcount -= sizeof(unsigned long); in dentry_string_cmp()
258 tcount--; in dentry_string_cmp()
283 const unsigned char *cs = READ_ONCE(dentry->d_name.name); in dentry_cmp()
298 return container_of(dentry->d_name.name, struct external_name, name[0]); in external_name()
317 return dentry->d_name.name != dentry->d_iname; in dname_external()
322 spin_lock(&dentry->d_lock); in take_dentry_name_snapshot()
323 name->name = dentry->d_name; in take_dentry_name_snapshot()
325 atomic_inc(&external_name(dentry)->u.count); in take_dentry_name_snapshot()
327 memcpy(name->inline_name, dentry->d_iname, in take_dentry_name_snapshot()
328 dentry->d_name.len + 1); in take_dentry_name_snapshot()
329 name->name.name = name->inline_name; in take_dentry_name_snapshot()
331 spin_unlock(&dentry->d_lock); in take_dentry_name_snapshot()
337 if (unlikely(name->name.name != name->inline_name)) { in release_dentry_name_snapshot()
339 p = container_of(name->name.name, struct external_name, name[0]); in release_dentry_name_snapshot()
340 if (unlikely(atomic_dec_and_test(&p->u.count))) in release_dentry_name_snapshot()
352 dentry->d_inode = inode; in __d_set_inode_and_type()
353 flags = READ_ONCE(dentry->d_flags); in __d_set_inode_and_type()
356 smp_store_release(&dentry->d_flags, flags); in __d_set_inode_and_type()
361 unsigned flags = READ_ONCE(dentry->d_flags); in __d_clear_type_and_inode()
364 WRITE_ONCE(dentry->d_flags, flags); in __d_clear_type_and_inode()
365 dentry->d_inode = NULL; in __d_clear_type_and_inode()
376 WARN_ON(!hlist_unhashed(&dentry->d_u.d_alias)); in dentry_free()
379 if (likely(atomic_dec_and_test(&p->u.count))) { in dentry_free()
380 call_rcu(&dentry->d_u.d_rcu, __d_free_external); in dentry_free()
385 if (dentry->d_flags & DCACHE_NORCU) in dentry_free()
386 __d_free(&dentry->d_u.d_rcu); in dentry_free()
388 call_rcu(&dentry->d_u.d_rcu, __d_free); in dentry_free()
396 __releases(dentry->d_lock) in dentry_unlink_inode()
397 __releases(dentry->d_inode->i_lock) in dentry_unlink_inode()
399 struct inode *inode = dentry->d_inode; in dentry_unlink_inode()
401 raw_write_seqcount_begin(&dentry->d_seq); in dentry_unlink_inode()
403 hlist_del_init(&dentry->d_u.d_alias); in dentry_unlink_inode()
404 raw_write_seqcount_end(&dentry->d_seq); in dentry_unlink_inode()
405 spin_unlock(&dentry->d_lock); in dentry_unlink_inode()
406 spin_unlock(&inode->i_lock); in dentry_unlink_inode()
407 if (!inode->i_nlink) in dentry_unlink_inode()
409 if (dentry->d_op && dentry->d_op->d_iput) in dentry_unlink_inode()
410 dentry->d_op->d_iput(dentry, inode); in dentry_unlink_inode()
417 * is in use - which includes both the "real" per-superblock
423 * The per-cpu "nr_dentry_unused" counters are updated with
426 * The per-cpu "nr_dentry_negative" counters are only updated
427 * when deleted from or added to the per-superblock LRU list, not
434 #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_…
438 dentry->d_flags |= DCACHE_LRU_LIST; in d_lru_add()
443 &dentry->d_sb->s_dentry_lru, &dentry->d_lru)); in d_lru_add()
449 dentry->d_flags &= ~DCACHE_LRU_LIST; in d_lru_del()
454 &dentry->d_sb->s_dentry_lru, &dentry->d_lru)); in d_lru_del()
460 list_del_init(&dentry->d_lru); in d_shrink_del()
461 dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST); in d_shrink_del()
468 list_add(&dentry->d_lru, list); in d_shrink_add()
469 dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST; in d_shrink_add()
482 dentry->d_flags &= ~DCACHE_LRU_LIST; in d_lru_isolate()
486 list_lru_isolate(lru, &dentry->d_lru); in d_lru_isolate()
493 dentry->d_flags |= DCACHE_SHRINK_LIST; in d_lru_shrink_move()
496 list_lru_isolate_move(lru, &dentry->d_lru, list); in d_lru_shrink_move()
508 b = &dentry->d_sb->s_roots; in ___d_drop()
510 b = d_hash(dentry->d_name.hash); in ___d_drop()
513 __hlist_bl_del(&dentry->d_hash); in ___d_drop()
521 dentry->d_hash.pprev = NULL; in __d_drop()
522 write_seqcount_invalidate(&dentry->d_seq); in __d_drop()
528 * d_drop - drop a dentry
533 * deleting the dentry - d_delete will try to mark the dentry negative if
540 * __d_drop requires dentry->d_lock
543 * (dentry->d_hash.pprev will be LIST_POISON2, not NULL).
547 spin_lock(&dentry->d_lock); in d_drop()
549 spin_unlock(&dentry->d_lock); in d_drop()
560 dentry->d_flags |= DCACHE_DENTRY_KILLED; in dentry_unlist()
561 if (unlikely(hlist_unhashed(&dentry->d_sib))) in dentry_unlist()
563 __hlist_del(&dentry->d_sib); in dentry_unlist()
566 * a normal list member, it didn't matter - ->d_sib.next would've in dentry_unlist()
569 * Normally d_walk() doesn't care about cursors moving around - in dentry_unlist()
570 * ->d_lock on parent prevents that and since a cursor has no children in dentry_unlist()
572 * There is one exception, though - if we ascend from a child that in dentry_unlist()
574 * using the value left in its ->d_sib.next. And if _that_ in dentry_unlist()
576 * before d_walk() regains parent->d_lock, we'll end up skipping in dentry_unlist()
579 * Solution: make sure that the pointer left behind in ->d_sib.next in dentry_unlist()
583 while (dentry->d_sib.next) { in dentry_unlist()
584 next = hlist_entry(dentry->d_sib.next, struct dentry, d_sib); in dentry_unlist()
585 if (likely(!(next->d_flags & DCACHE_DENTRY_CURSOR))) in dentry_unlist()
587 dentry->d_sib.next = next->d_sib.next; in dentry_unlist()
599 lockref_mark_dead(&dentry->d_lockref); in __dentry_kill()
605 if (dentry->d_flags & DCACHE_OP_PRUNE) in __dentry_kill()
606 dentry->d_op->d_prune(dentry); in __dentry_kill()
608 if (dentry->d_flags & DCACHE_LRU_LIST) { in __dentry_kill()
609 if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) in __dentry_kill()
614 if (dentry->d_inode) in __dentry_kill()
617 spin_unlock(&dentry->d_lock); in __dentry_kill()
619 if (dentry->d_op && dentry->d_op->d_release) in __dentry_kill()
620 dentry->d_op->d_release(dentry); in __dentry_kill()
623 /* now that it's negative, ->d_parent is stable */ in __dentry_kill()
625 parent = dentry->d_parent; in __dentry_kill()
626 spin_lock(&parent->d_lock); in __dentry_kill()
628 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); in __dentry_kill()
630 if (dentry->d_flags & DCACHE_SHRINK_LIST) in __dentry_kill()
632 spin_unlock(&dentry->d_lock); in __dentry_kill()
635 if (parent && --parent->d_lockref.count) { in __dentry_kill()
636 spin_unlock(&parent->d_lock); in __dentry_kill()
644 * Called under rcu_read_lock() and dentry->d_lock; the former
655 struct inode *inode = dentry->d_inode; in lock_for_kill()
657 if (unlikely(dentry->d_lockref.count)) in lock_for_kill()
660 if (!inode || likely(spin_trylock(&inode->i_lock))) in lock_for_kill()
664 spin_unlock(&dentry->d_lock); in lock_for_kill()
665 spin_lock(&inode->i_lock); in lock_for_kill()
666 spin_lock(&dentry->d_lock); in lock_for_kill()
667 if (likely(inode == dentry->d_inode)) in lock_for_kill()
669 spin_unlock(&inode->i_lock); in lock_for_kill()
670 inode = dentry->d_inode; in lock_for_kill()
672 if (likely(!dentry->d_lockref.count)) in lock_for_kill()
675 spin_unlock(&inode->i_lock); in lock_for_kill()
687 * re-gotten a reference to the dentry and change that, but our work is done -
695 d_flags = READ_ONCE(dentry->d_flags); in retain_dentry()
705 // ->d_delete() might tell us not to bother, but that requires in retain_dentry()
706 // ->d_lock; can't decide without it in retain_dentry()
708 if (!locked || dentry->d_op->d_delete(dentry)) in retain_dentry()
717 // need to do something - put it on LRU if it wasn't there already in retain_dentry()
719 // Unfortunately, both actions require ->d_lock, so in lockless in retain_dentry()
728 dentry->d_flags |= DCACHE_REFERENCED; in retain_dentry()
735 struct dentry *de; in d_mark_dontcache() local
737 spin_lock(&inode->i_lock); in d_mark_dontcache()
738 hlist_for_each_entry(de, &inode->i_dentry, d_u.d_alias) { in d_mark_dontcache()
739 spin_lock(&de->d_lock); in d_mark_dontcache()
740 de->d_flags |= DCACHE_DONTCACHE; in d_mark_dontcache()
741 spin_unlock(&de->d_lock); in d_mark_dontcache()
743 inode->i_state |= I_DONTCACHE; in d_mark_dontcache()
744 spin_unlock(&inode->i_lock); in d_mark_dontcache()
765 ret = lockref_put_return(&dentry->d_lockref); in fast_dput()
773 spin_lock(&dentry->d_lock); in fast_dput()
774 if (WARN_ON_ONCE(dentry->d_lockref.count <= 0)) { in fast_dput()
775 spin_unlock(&dentry->d_lock); in fast_dput()
778 dentry->d_lockref.count--; in fast_dput()
790 * taking the lock? There's a very common case when it's all we need - in fast_dput()
800 * but we'll need to re-check the situation after getting the lock. in fast_dput()
802 spin_lock(&dentry->d_lock); in fast_dput()
811 if (dentry->d_lockref.count || retain_dentry(dentry, true)) { in fast_dput()
812 spin_unlock(&dentry->d_lock); in fast_dput()
837 * dput - release a dentry
861 spin_unlock(&dentry->d_lock); in dput()
867 spin_unlock(&dentry->d_lock); in dput()
872 __must_hold(&dentry->d_lock) in to_shrink_list()
874 if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { in to_shrink_list()
875 if (dentry->d_flags & DCACHE_LRU_LIST) in to_shrink_list()
890 spin_unlock(&dentry->d_lock); in dput_to_list()
904 seq = raw_seqcount_begin(&dentry->d_seq); in dget_parent()
905 ret = READ_ONCE(dentry->d_parent); in dget_parent()
906 gotref = lockref_get_not_zero(&ret->d_lockref); in dget_parent()
909 if (!read_seqcount_retry(&dentry->d_seq, seq)) in dget_parent()
916 * Don't need rcu_dereference because we re-check it was correct under in dget_parent()
920 ret = dentry->d_parent; in dget_parent()
921 spin_lock(&ret->d_lock); in dget_parent()
922 if (unlikely(ret != dentry->d_parent)) { in dget_parent()
923 spin_unlock(&ret->d_lock); in dget_parent()
928 BUG_ON(!ret->d_lockref.count); in dget_parent()
929 ret->d_lockref.count++; in dget_parent()
930 spin_unlock(&ret->d_lock); in dget_parent()
939 if (hlist_empty(&inode->i_dentry)) in __d_find_any_alias()
941 alias = hlist_entry(inode->i_dentry.first, struct dentry, d_u.d_alias); in __d_find_any_alias()
942 lockref_get(&alias->d_lockref); in __d_find_any_alias()
947 * d_find_any_alias - find any alias for a given inode
955 struct dentry *de; in d_find_any_alias() local
957 spin_lock(&inode->i_lock); in d_find_any_alias()
958 de = __d_find_any_alias(inode); in d_find_any_alias()
959 spin_unlock(&inode->i_lock); in d_find_any_alias()
960 return de; in d_find_any_alias()
968 if (S_ISDIR(inode->i_mode)) in __d_find_alias()
971 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) { in __d_find_alias()
972 spin_lock(&alias->d_lock); in __d_find_alias()
975 spin_unlock(&alias->d_lock); in __d_find_alias()
978 spin_unlock(&alias->d_lock); in __d_find_alias()
984 * d_find_alias - grab a hashed alias of inode
999 struct dentry *de = NULL; in d_find_alias() local
1001 if (!hlist_empty(&inode->i_dentry)) { in d_find_alias()
1002 spin_lock(&inode->i_lock); in d_find_alias()
1003 de = __d_find_alias(inode); in d_find_alias()
1004 spin_unlock(&inode->i_lock); in d_find_alias()
1006 return de; in d_find_alias()
1016 struct hlist_head *l = &inode->i_dentry; in d_find_alias_rcu()
1017 struct dentry *de = NULL; in d_find_alias_rcu() local
1019 spin_lock(&inode->i_lock); in d_find_alias_rcu()
1020 // ->i_dentry and ->i_rcu are colocated, but the latter won't be in d_find_alias_rcu()
1022 if (likely(!(inode->i_state & I_FREEING) && !hlist_empty(l))) { in d_find_alias_rcu()
1023 if (S_ISDIR(inode->i_mode)) { in d_find_alias_rcu()
1024 de = hlist_entry(l->first, struct dentry, d_u.d_alias); in d_find_alias_rcu()
1026 hlist_for_each_entry(de, l, d_u.d_alias) in d_find_alias_rcu()
1027 if (!d_unhashed(de)) in d_find_alias_rcu()
1031 spin_unlock(&inode->i_lock); in d_find_alias_rcu()
1032 return de; in d_find_alias_rcu()
1044 spin_lock(&inode->i_lock); in d_prune_aliases()
1045 hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) { in d_prune_aliases()
1046 spin_lock(&dentry->d_lock); in d_prune_aliases()
1047 if (!dentry->d_lockref.count) in d_prune_aliases()
1049 spin_unlock(&dentry->d_lock); in d_prune_aliases()
1051 spin_unlock(&inode->i_lock); in d_prune_aliases()
1065 spin_unlock(&victim->d_lock); in shrink_kill()
1073 dentry = list_entry(list->prev, struct dentry, d_lru); in shrink_dentry_list()
1074 spin_lock(&dentry->d_lock); in shrink_dentry_list()
1080 can_free = dentry->d_flags & DCACHE_DENTRY_KILLED; in shrink_dentry_list()
1081 spin_unlock(&dentry->d_lock); in shrink_dentry_list()
1099 * we are inverting the lru lock/dentry->d_lock here, in dentry_lru_isolate()
1103 if (!spin_trylock(&dentry->d_lock)) in dentry_lru_isolate()
1111 if (dentry->d_lockref.count) { in dentry_lru_isolate()
1113 spin_unlock(&dentry->d_lock); in dentry_lru_isolate()
1117 if (dentry->d_flags & DCACHE_REFERENCED) { in dentry_lru_isolate()
1118 dentry->d_flags &= ~DCACHE_REFERENCED; in dentry_lru_isolate()
1119 spin_unlock(&dentry->d_lock); in dentry_lru_isolate()
1123 * this point, we've dropped the dentry->d_lock but keep the in dentry_lru_isolate()
1144 spin_unlock(&dentry->d_lock); in dentry_lru_isolate()
1150 * prune_dcache_sb - shrink the dcache
1154 * Attempt to shrink the superblock dcache LRU by @sc->nr_to_scan entries. This
1166 freed = list_lru_shrink_walk(&sb->s_dentry_lru, sc, in prune_dcache_sb()
1179 * we are inverting the lru lock/dentry->d_lock here, in dentry_lru_isolate_shrink()
1183 if (!spin_trylock(&dentry->d_lock)) in dentry_lru_isolate_shrink()
1187 spin_unlock(&dentry->d_lock); in dentry_lru_isolate_shrink()
1194 * shrink_dcache_sb - shrink dcache for a superblock
1205 list_lru_walk(&sb->s_dentry_lru, in shrink_dcache_sb()
1208 } while (list_lru_count(&sb->s_dentry_lru) > 0); in shrink_dcache_sb()
1213 * enum d_walk_ret - action to talke during tree walk
1227 * d_walk - walk the dentry tree
1245 spin_lock(&this_parent->d_lock); in d_walk()
1262 if (unlikely(dentry->d_flags & DCACHE_DENTRY_CURSOR)) in d_walk()
1265 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); in d_walk()
1272 spin_unlock(&dentry->d_lock); in d_walk()
1278 spin_unlock(&dentry->d_lock); in d_walk()
1282 if (!hlist_empty(&dentry->d_children)) { in d_walk()
1283 spin_unlock(&this_parent->d_lock); in d_walk()
1284 spin_release(&dentry->d_lock.dep_map, _RET_IP_); in d_walk()
1286 spin_acquire(&this_parent->d_lock.dep_map, 0, 1, _RET_IP_); in d_walk()
1289 spin_unlock(&dentry->d_lock); in d_walk()
1298 this_parent = dentry->d_parent; in d_walk()
1300 spin_unlock(&dentry->d_lock); in d_walk()
1301 spin_lock(&this_parent->d_lock); in d_walk()
1308 if (likely(!(dentry->d_flags & DCACHE_DENTRY_KILLED))) { in d_walk()
1320 spin_unlock(&this_parent->d_lock); in d_walk()
1325 spin_unlock(&this_parent->d_lock); in d_walk()
1342 struct path path = { .mnt = info->mnt, .dentry = dentry }; in path_check_mount()
1347 info->mounted = 1; in path_check_mount()
1354 * path_has_submounts - check for mounts over a dentry in the
1363 struct check_mount data = { .mnt = parent->mnt, .mounted = 0 }; in path_has_submounts()
1366 d_walk(parent->dentry, &data, path_check_mount); in path_has_submounts()
1384 int ret = -ENOENT; in d_set_mounted()
1386 for (p = dentry->d_parent; !IS_ROOT(p); p = p->d_parent) { in d_set_mounted()
1388 spin_lock(&p->d_lock); in d_set_mounted()
1390 spin_unlock(&p->d_lock); in d_set_mounted()
1393 spin_unlock(&p->d_lock); in d_set_mounted()
1395 spin_lock(&dentry->d_lock); in d_set_mounted()
1397 ret = -EBUSY; in d_set_mounted()
1399 dentry->d_flags |= DCACHE_MOUNTED; in d_set_mounted()
1403 spin_unlock(&dentry->d_lock); in d_set_mounted()
1413 * whenever the d_children list is non-empty and continue
1438 if (data->start == dentry) in select_collect()
1441 if (dentry->d_flags & DCACHE_SHRINK_LIST) { in select_collect()
1442 data->found++; in select_collect()
1443 } else if (!dentry->d_lockref.count) { in select_collect()
1444 to_shrink_list(dentry, &data->dispose); in select_collect()
1445 data->found++; in select_collect()
1446 } else if (dentry->d_lockref.count < 0) { in select_collect()
1447 data->found++; in select_collect()
1454 if (!list_empty(&data->dispose)) in select_collect()
1465 if (data->start == dentry) in select_collect2()
1468 if (!dentry->d_lockref.count) { in select_collect2()
1469 if (dentry->d_flags & DCACHE_SHRINK_LIST) { in select_collect2()
1471 data->victim = dentry; in select_collect2()
1474 to_shrink_list(dentry, &data->dispose); in select_collect2()
1481 if (!list_empty(&data->dispose)) in select_collect2()
1488 * shrink_dcache_parent - prune dcache
1512 spin_lock(&data.victim->d_lock); in shrink_dcache_parent()
1514 spin_unlock(&data.victim->d_lock); in shrink_dcache_parent()
1529 if (!hlist_empty(&dentry->d_children)) in umount_check()
1533 if (dentry == _data && dentry->d_lockref.count == 1) in umount_check()
1539 dentry->d_inode ? in umount_check()
1540 dentry->d_inode->i_ino : 0UL, in umount_check()
1542 dentry->d_lockref.count, in umount_check()
1543 dentry->d_sb->s_type->name, in umount_check()
1544 dentry->d_sb->s_id); in umount_check()
1563 rwsem_assert_held_write(&sb->s_umount); in shrink_dcache_for_umount()
1565 dentry = sb->s_root; in shrink_dcache_for_umount()
1566 sb->s_root = NULL; in shrink_dcache_for_umount()
1569 while (!hlist_bl_empty(&sb->s_roots)) { in shrink_dcache_for_umount()
1570 dentry = dget(hlist_bl_entry(hlist_bl_first(&sb->s_roots), struct dentry, d_hash)); in shrink_dcache_for_umount()
1586 * d_invalidate - detach submounts, prune dcache, and drop
1592 spin_lock(&dentry->d_lock); in d_invalidate()
1594 spin_unlock(&dentry->d_lock); in d_invalidate()
1598 spin_unlock(&dentry->d_lock); in d_invalidate()
1601 if (!dentry->d_inode) in d_invalidate()
1621 * __d_alloc - allocate a dcache entry
1636 dentry = kmem_cache_alloc_lru(dentry_cache, &sb->s_dentry_lru, in __d_alloc()
1642 * We guarantee that the inline name is always NUL-terminated. in __d_alloc()
1647 dentry->d_iname[DNAME_INLINE_LEN-1] = 0; in __d_alloc()
1650 dname = dentry->d_iname; in __d_alloc()
1651 } else if (name->len > DNAME_INLINE_LEN-1) { in __d_alloc()
1653 struct external_name *p = kmalloc(size + name->len, in __d_alloc()
1660 atomic_set(&p->u.count, 1); in __d_alloc()
1661 dname = p->name; in __d_alloc()
1663 dname = dentry->d_iname; in __d_alloc()
1666 dentry->d_name.len = name->len; in __d_alloc()
1667 dentry->d_name.hash = name->hash; in __d_alloc()
1668 memcpy(dname, name->name, name->len); in __d_alloc()
1669 dname[name->len] = 0; in __d_alloc()
1672 smp_store_release(&dentry->d_name.name, dname); /* ^^^ */ in __d_alloc()
1674 dentry->d_lockref.count = 1; in __d_alloc()
1675 dentry->d_flags = 0; in __d_alloc()
1676 spin_lock_init(&dentry->d_lock); in __d_alloc()
1677 seqcount_spinlock_init(&dentry->d_seq, &dentry->d_lock); in __d_alloc()
1678 dentry->d_inode = NULL; in __d_alloc()
1679 dentry->d_parent = dentry; in __d_alloc()
1680 dentry->d_sb = sb; in __d_alloc()
1681 dentry->d_op = NULL; in __d_alloc()
1682 dentry->d_fsdata = NULL; in __d_alloc()
1683 INIT_HLIST_BL_NODE(&dentry->d_hash); in __d_alloc()
1684 INIT_LIST_HEAD(&dentry->d_lru); in __d_alloc()
1685 INIT_HLIST_HEAD(&dentry->d_children); in __d_alloc()
1686 INIT_HLIST_NODE(&dentry->d_u.d_alias); in __d_alloc()
1687 INIT_HLIST_NODE(&dentry->d_sib); in __d_alloc()
1688 d_set_d_op(dentry, dentry->d_sb->s_d_op); in __d_alloc()
1690 if (dentry->d_op && dentry->d_op->d_init) { in __d_alloc()
1691 err = dentry->d_op->d_init(dentry); in __d_alloc()
1706 * d_alloc - allocate a dcache entry
1716 struct dentry *dentry = __d_alloc(parent->d_sb, name); in d_alloc()
1719 spin_lock(&parent->d_lock); in d_alloc()
1724 dentry->d_parent = dget_dlock(parent); in d_alloc()
1725 hlist_add_head(&dentry->d_sib, &parent->d_children); in d_alloc()
1726 spin_unlock(&parent->d_lock); in d_alloc()
1740 struct dentry *dentry = d_alloc_anon(parent->d_sb); in d_alloc_cursor()
1742 dentry->d_flags |= DCACHE_DENTRY_CURSOR; in d_alloc_cursor()
1743 dentry->d_parent = dget(parent); in d_alloc_cursor()
1749 * d_alloc_pseudo - allocate a dentry (for lookup-less filesystems)
1755 * This is used for pipes, sockets et.al. - the stuff that should
1770 dentry->d_flags |= DCACHE_NORCU; in d_alloc_pseudo()
1771 if (!sb->s_d_op) in d_alloc_pseudo()
1789 WARN_ON_ONCE(dentry->d_op); in d_set_d_op()
1790 WARN_ON_ONCE(dentry->d_flags & (DCACHE_OP_HASH | in d_set_d_op()
1796 dentry->d_op = op; in d_set_d_op()
1799 if (op->d_hash) in d_set_d_op()
1800 dentry->d_flags |= DCACHE_OP_HASH; in d_set_d_op()
1801 if (op->d_compare) in d_set_d_op()
1802 dentry->d_flags |= DCACHE_OP_COMPARE; in d_set_d_op()
1803 if (op->d_revalidate) in d_set_d_op()
1804 dentry->d_flags |= DCACHE_OP_REVALIDATE; in d_set_d_op()
1805 if (op->d_weak_revalidate) in d_set_d_op()
1806 dentry->d_flags |= DCACHE_OP_WEAK_REVALIDATE; in d_set_d_op()
1807 if (op->d_delete) in d_set_d_op()
1808 dentry->d_flags |= DCACHE_OP_DELETE; in d_set_d_op()
1809 if (op->d_prune) in d_set_d_op()
1810 dentry->d_flags |= DCACHE_OP_PRUNE; in d_set_d_op()
1811 if (op->d_real) in d_set_d_op()
1812 dentry->d_flags |= DCACHE_OP_REAL; in d_set_d_op()
1824 if (S_ISDIR(inode->i_mode)) { in d_flags_for_inode()
1826 if (unlikely(!(inode->i_opflags & IOP_LOOKUP))) { in d_flags_for_inode()
1827 if (unlikely(!inode->i_op->lookup)) in d_flags_for_inode()
1830 inode->i_opflags |= IOP_LOOKUP; in d_flags_for_inode()
1835 if (unlikely(!(inode->i_opflags & IOP_NOFOLLOW))) { in d_flags_for_inode()
1836 if (unlikely(inode->i_op->get_link)) { in d_flags_for_inode()
1840 inode->i_opflags |= IOP_NOFOLLOW; in d_flags_for_inode()
1843 if (unlikely(!S_ISREG(inode->i_mode))) in d_flags_for_inode()
1857 spin_lock(&dentry->d_lock); in __d_instantiate()
1862 if ((dentry->d_flags & in __d_instantiate()
1865 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); in __d_instantiate()
1866 raw_write_seqcount_begin(&dentry->d_seq); in __d_instantiate()
1868 raw_write_seqcount_end(&dentry->d_seq); in __d_instantiate()
1870 spin_unlock(&dentry->d_lock); in __d_instantiate()
1874 * d_instantiate - fill in inode information for a dentry
1890 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias)); in d_instantiate()
1893 spin_lock(&inode->i_lock); in d_instantiate()
1895 spin_unlock(&inode->i_lock); in d_instantiate()
1902 * with lockdep-related part of unlock_new_inode() done before
1903 * anything else. Use that instead of open-coding d_instantiate()/
1908 BUG_ON(!hlist_unhashed(&entry->d_u.d_alias)); in d_instantiate_new()
1912 spin_lock(&inode->i_lock); in d_instantiate_new()
1914 WARN_ON(!(inode->i_state & I_NEW)); in d_instantiate_new()
1915 inode->i_state &= ~I_NEW & ~I_CREATING; in d_instantiate_new()
1923 spin_unlock(&inode->i_lock); in d_instantiate_new()
1932 res = d_alloc_anon(root_inode->i_sb); in d_make_root()
1948 return ERR_PTR(-ESTALE); in __d_obtain_alias()
1952 sb = inode->i_sb; in __d_obtain_alias()
1960 res = ERR_PTR(-ENOMEM); in __d_obtain_alias()
1965 spin_lock(&inode->i_lock); in __d_obtain_alias()
1973 spin_lock(&new->d_lock); in __d_obtain_alias()
1975 hlist_add_head(&new->d_u.d_alias, &inode->i_dentry); in __d_obtain_alias()
1977 hlist_bl_lock(&sb->s_roots); in __d_obtain_alias()
1978 hlist_bl_add_head(&new->d_hash, &sb->s_roots); in __d_obtain_alias()
1979 hlist_bl_unlock(&sb->s_roots); in __d_obtain_alias()
1981 spin_unlock(&new->d_lock); in __d_obtain_alias()
1982 spin_unlock(&inode->i_lock); in __d_obtain_alias()
1983 inode = NULL; /* consumed by new->d_inode */ in __d_obtain_alias()
1986 spin_unlock(&inode->i_lock); in __d_obtain_alias()
1996 * d_obtain_alias - find or allocate a DISCONNECTED dentry for a given inode
2011 * with a %NULL @inode replaced by ERR_PTR(-ESTALE).
2020 * d_obtain_root - find or allocate a dentry for a given inode
2032 * replaced by ERR_PTR(-ESTALE).
2041 * d_add_ci - lookup or allocate new dentry with case-exact name
2042 * @inode: the inode case-insensitive lookup has found
2044 * @name: the case-exact name to be associated with the returned dentry
2046 * This is to avoid filling the dcache with case-insensitive names to the
2048 * case-insensitive filesystems.
2050 * For a case-insensitive lookup match and if the case-exact dentry
2065 found = d_hash_and_lookup(dentry->d_parent, name); in d_add_ci()
2071 found = d_alloc_parallel(dentry->d_parent, name, in d_add_ci()
2072 dentry->d_wait); in d_add_ci()
2078 found = d_alloc(dentry->d_parent, name); in d_add_ci()
2081 return ERR_PTR(-ENOMEM); in d_add_ci()
2095 * d_same_name - compare dentry name with case-exact name
2098 * @name: the case-exact name to be associated with the returned dentry
2105 if (likely(!(parent->d_flags & DCACHE_OP_COMPARE))) { in d_same_name()
2106 if (dentry->d_name.len != name->len) in d_same_name()
2108 return dentry_cmp(dentry, name->name, name->len) == 0; in d_same_name()
2110 return parent->d_op->d_compare(dentry, in d_same_name()
2111 dentry->d_name.len, dentry->d_name.name, in d_same_name()
2125 u64 hashlen = name->hash_len; in __d_lookup_rcu_op_compare()
2136 seq = raw_seqcount_begin(&dentry->d_seq); in __d_lookup_rcu_op_compare()
2137 if (dentry->d_parent != parent) in __d_lookup_rcu_op_compare()
2141 if (dentry->d_name.hash != hashlen_hash(hashlen)) in __d_lookup_rcu_op_compare()
2143 tlen = dentry->d_name.len; in __d_lookup_rcu_op_compare()
2144 tname = dentry->d_name.name; in __d_lookup_rcu_op_compare()
2146 if (read_seqcount_retry(&dentry->d_seq, seq)) { in __d_lookup_rcu_op_compare()
2150 if (parent->d_op->d_compare(dentry, tlen, tname, name) != 0) in __d_lookup_rcu_op_compare()
2159 * __d_lookup_rcu - search for a dentry (racy, store-free)
2165 * __d_lookup_rcu is the dcache lookup function for rcu-walk name
2166 * resolution (store-free path walking) design described in
2167 * Documentation/filesystems/path-lookup.txt.
2171 * __d_lookup_rcu must only be used in rcu-walk mode, ie. with vfsmount lock
2188 u64 hashlen = name->hash_len; in __d_lookup_rcu()
2189 const unsigned char *str = name->name; in __d_lookup_rcu()
2201 if (unlikely(parent->d_flags & DCACHE_OP_COMPARE)) in __d_lookup_rcu()
2212 * false-negative result. d_lookup() protects against concurrent in __d_lookup_rcu()
2215 * See Documentation/filesystems/path-lookup.txt for more details. in __d_lookup_rcu()
2235 * we are still guaranteed NUL-termination of ->d_name.name. in __d_lookup_rcu()
2237 seq = raw_seqcount_begin(&dentry->d_seq); in __d_lookup_rcu()
2238 if (dentry->d_parent != parent) in __d_lookup_rcu()
2242 if (dentry->d_name.hash_len != hashlen) in __d_lookup_rcu()
2253 * d_lookup - search for a dentry
2279 * __d_lookup - search for a dentry (racy)
2285 * false-negative result due to unrelated rename activity.
2295 unsigned int hash = name->hash; in __d_lookup()
2316 * false-negative result. d_lookup() protects against concurrent in __d_lookup()
2319 * See Documentation/filesystems/path-lookup.txt for more details. in __d_lookup()
2325 if (dentry->d_name.hash != hash) in __d_lookup()
2328 spin_lock(&dentry->d_lock); in __d_lookup()
2329 if (dentry->d_parent != parent) in __d_lookup()
2337 dentry->d_lockref.count++; in __d_lookup()
2339 spin_unlock(&dentry->d_lock); in __d_lookup()
2342 spin_unlock(&dentry->d_lock); in __d_lookup()
2350 * d_hash_and_lookup - hash the qstr then search for a dentry
2354 * On lookup failure NULL is returned; on bad name - ERR_PTR(-error)
2359 * Check for a fs-specific hash function. Note that we must in d_hash_and_lookup()
2360 * calculate the standard hash first, as the d_op->d_hash() in d_hash_and_lookup()
2363 name->hash = full_name_hash(dir, name->name, name->len); in d_hash_and_lookup()
2364 if (dir->d_flags & DCACHE_OP_HASH) { in d_hash_and_lookup()
2365 int err = dir->d_op->d_hash(dir, name); in d_hash_and_lookup()
2375 * - turn this dentry into a negative dentry
2376 * - unhash this dentry and free it.
2387 * d_delete - delete a dentry
2396 struct inode *inode = dentry->d_inode; in d_delete()
2398 spin_lock(&inode->i_lock); in d_delete()
2399 spin_lock(&dentry->d_lock); in d_delete()
2403 if (dentry->d_lockref.count == 1) { in d_delete()
2404 dentry->d_flags &= ~DCACHE_CANT_MOUNT; in d_delete()
2408 spin_unlock(&dentry->d_lock); in d_delete()
2409 spin_unlock(&inode->i_lock); in d_delete()
2416 struct hlist_bl_head *b = d_hash(entry->d_name.hash); in __d_rehash()
2419 hlist_bl_add_head_rcu(&entry->d_hash, b); in __d_rehash()
2424 * d_rehash - add an entry back to the hash
2432 spin_lock(&entry->d_lock); in d_rehash()
2434 spin_unlock(&entry->d_lock); in d_rehash()
2442 unsigned n = dir->i_dir_seq; in start_dir_add()
2443 if (!(n & 1) && cmpxchg(&dir->i_dir_seq, n, n + 1) == n) in start_dir_add()
2452 smp_store_release(&dir->i_dir_seq, n + 2); in end_dir_add()
2461 add_wait_queue(dentry->d_wait, &wait); in d_wait_lookup()
2464 spin_unlock(&dentry->d_lock); in d_wait_lookup()
2466 spin_lock(&dentry->d_lock); in d_wait_lookup()
2475 unsigned int hash = name->hash; in d_alloc_parallel()
2483 return ERR_PTR(-ENOMEM); in d_alloc_parallel()
2487 seq = smp_load_acquire(&parent->d_inode->i_dir_seq); in d_alloc_parallel()
2491 if (!lockref_get_not_dead(&dentry->d_lockref)) { in d_alloc_parallel()
2495 if (read_seqcount_retry(&dentry->d_seq, d_seq)) { in d_alloc_parallel()
2515 if (unlikely(READ_ONCE(parent->d_inode->i_dir_seq) != seq)) { in d_alloc_parallel()
2523 * any potential in-lookup matches are going to stay here until in d_alloc_parallel()
2528 if (dentry->d_name.hash != hash) in d_alloc_parallel()
2530 if (dentry->d_parent != parent) in d_alloc_parallel()
2536 if (!lockref_get_not_dead(&dentry->d_lockref)) { in d_alloc_parallel()
2546 spin_lock(&dentry->d_lock); in d_alloc_parallel()
2549 * it's not in-lookup anymore; in principle we should repeat in d_alloc_parallel()
2554 if (unlikely(dentry->d_name.hash != hash)) in d_alloc_parallel()
2556 if (unlikely(dentry->d_parent != parent)) in d_alloc_parallel()
2563 spin_unlock(&dentry->d_lock); in d_alloc_parallel()
2568 /* we can't take ->d_lock here; it's OK, though. */ in d_alloc_parallel()
2569 new->d_flags |= DCACHE_PAR_LOOKUP; in d_alloc_parallel()
2570 new->d_wait = wq; in d_alloc_parallel()
2571 hlist_bl_add_head(&new->d_u.d_in_lookup_hash, b); in d_alloc_parallel()
2575 spin_unlock(&dentry->d_lock); in d_alloc_parallel()
2582 * - Unhash the dentry
2583 * - Retrieve and clear the waitqueue head in dentry
2584 * - Return the waitqueue head
2591 lockdep_assert_held(&dentry->d_lock); in __d_lookup_unhash()
2593 b = in_lookup_hash(dentry->d_parent, dentry->d_name.hash); in __d_lookup_unhash()
2595 dentry->d_flags &= ~DCACHE_PAR_LOOKUP; in __d_lookup_unhash()
2596 __hlist_bl_del(&dentry->d_u.d_in_lookup_hash); in __d_lookup_unhash()
2597 d_wait = dentry->d_wait; in __d_lookup_unhash()
2598 dentry->d_wait = NULL; in __d_lookup_unhash()
2600 INIT_HLIST_NODE(&dentry->d_u.d_alias); in __d_lookup_unhash()
2601 INIT_LIST_HEAD(&dentry->d_lru); in __d_lookup_unhash()
2607 spin_lock(&dentry->d_lock); in __d_lookup_unhash_wake()
2609 spin_unlock(&dentry->d_lock); in __d_lookup_unhash_wake()
2613 /* inode->i_lock held if inode is non-NULL */
2620 spin_lock(&dentry->d_lock); in __d_add()
2622 dir = dentry->d_parent->d_inode; in __d_add()
2628 hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry); in __d_add()
2629 raw_write_seqcount_begin(&dentry->d_seq); in __d_add()
2631 raw_write_seqcount_end(&dentry->d_seq); in __d_add()
2637 spin_unlock(&dentry->d_lock); in __d_add()
2639 spin_unlock(&inode->i_lock); in __d_add()
2643 * d_add - add dentry to hash queues
2655 spin_lock(&inode->i_lock); in d_add()
2662 * d_exact_alias - find and hash an exact unhashed alias
2675 unsigned int hash = entry->d_name.hash; in d_exact_alias()
2677 spin_lock(&inode->i_lock); in d_exact_alias()
2678 hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) { in d_exact_alias()
2680 * Don't need alias->d_lock here, because aliases with in d_exact_alias()
2681 * d_parent == entry->d_parent are not subject to name or in d_exact_alias()
2684 if (alias->d_name.hash != hash) in d_exact_alias()
2686 if (alias->d_parent != entry->d_parent) in d_exact_alias()
2688 if (!d_same_name(alias, entry->d_parent, &entry->d_name)) in d_exact_alias()
2690 spin_lock(&alias->d_lock); in d_exact_alias()
2692 spin_unlock(&alias->d_lock); in d_exact_alias()
2697 spin_unlock(&alias->d_lock); in d_exact_alias()
2699 spin_unlock(&inode->i_lock); in d_exact_alias()
2702 spin_unlock(&inode->i_lock); in d_exact_alias()
2714 swap(target->d_name.name, dentry->d_name.name); in swap_names()
2720 memcpy(target->d_iname, dentry->d_name.name, in swap_names()
2721 dentry->d_name.len + 1); in swap_names()
2722 dentry->d_name.name = target->d_name.name; in swap_names()
2723 target->d_name.name = target->d_iname; in swap_names()
2731 memcpy(dentry->d_iname, target->d_name.name, in swap_names()
2732 target->d_name.len + 1); in swap_names()
2733 target->d_name.name = dentry->d_name.name; in swap_names()
2734 dentry->d_name.name = dentry->d_iname; in swap_names()
2742 swap(((long *) &dentry->d_iname)[i], in swap_names()
2743 ((long *) &target->d_iname)[i]); in swap_names()
2747 swap(dentry->d_name.hash_len, target->d_name.hash_len); in swap_names()
2756 atomic_inc(&external_name(target)->u.count); in copy_name()
2757 dentry->d_name = target->d_name; in copy_name()
2759 memcpy(dentry->d_iname, target->d_name.name, in copy_name()
2760 target->d_name.len + 1); in copy_name()
2761 dentry->d_name.name = dentry->d_iname; in copy_name()
2762 dentry->d_name.hash_len = target->d_name.hash_len; in copy_name()
2764 if (old_name && likely(atomic_dec_and_test(&old_name->u.count))) in copy_name()
2769 * __d_move - move a dentry
2777 * and the sb->s_vfs_rename_mutex if they differ. See lock_rename().
2787 WARN_ON(!dentry->d_inode); in __d_move()
2792 old_parent = dentry->d_parent; in __d_move()
2796 spin_lock(&target->d_parent->d_lock); in __d_move()
2798 /* target is not a descendent of dentry->d_parent */ in __d_move()
2799 spin_lock(&target->d_parent->d_lock); in __d_move()
2800 spin_lock_nested(&old_parent->d_lock, DENTRY_D_LOCK_NESTED); in __d_move()
2803 spin_lock(&old_parent->d_lock); in __d_move()
2805 spin_lock_nested(&target->d_parent->d_lock, in __d_move()
2808 spin_lock_nested(&dentry->d_lock, 2); in __d_move()
2809 spin_lock_nested(&target->d_lock, 3); in __d_move()
2812 dir = target->d_parent->d_inode; in __d_move()
2817 write_seqcount_begin(&dentry->d_seq); in __d_move()
2818 write_seqcount_begin_nested(&target->d_seq, DENTRY_D_LOCK_NESTED); in __d_move()
2827 dentry->d_parent = target->d_parent; in __d_move()
2830 target->d_hash.pprev = NULL; in __d_move()
2831 dentry->d_parent->d_lockref.count++; in __d_move()
2833 WARN_ON(!--old_parent->d_lockref.count); in __d_move()
2835 target->d_parent = old_parent; in __d_move()
2837 if (!hlist_unhashed(&target->d_sib)) in __d_move()
2838 __hlist_del(&target->d_sib); in __d_move()
2839 hlist_add_head(&target->d_sib, &target->d_parent->d_children); in __d_move()
2843 if (!hlist_unhashed(&dentry->d_sib)) in __d_move()
2844 __hlist_del(&dentry->d_sib); in __d_move()
2845 hlist_add_head(&dentry->d_sib, &dentry->d_parent->d_children); in __d_move()
2850 write_seqcount_end(&target->d_seq); in __d_move()
2851 write_seqcount_end(&dentry->d_seq); in __d_move()
2856 if (dentry->d_parent != old_parent) in __d_move()
2857 spin_unlock(&dentry->d_parent->d_lock); in __d_move()
2859 spin_unlock(&old_parent->d_lock); in __d_move()
2860 spin_unlock(&target->d_lock); in __d_move()
2861 spin_unlock(&dentry->d_lock); in __d_move()
2865 * d_move - move a dentry
2882 * d_exchange - exchange two dentries
2890 WARN_ON(!dentry1->d_inode); in d_exchange()
2891 WARN_ON(!dentry2->d_inode); in d_exchange()
2901 * d_ancestor - search for an ancestor
2912 for (p = p2; !IS_ROOT(p); p = p->d_parent) { in d_ancestor()
2913 if (p->d_parent == p1) in d_ancestor()
2923 * dentry->d_parent->d_inode->i_mutex, and rename_lock
2932 int ret = -ESTALE; in __d_unalias()
2935 if (alias->d_parent == dentry->d_parent) in __d_unalias()
2939 if (!mutex_trylock(&dentry->d_sb->s_vfs_rename_mutex)) in __d_unalias()
2941 m1 = &dentry->d_sb->s_vfs_rename_mutex; in __d_unalias()
2942 if (!inode_trylock_shared(alias->d_parent->d_inode)) in __d_unalias()
2944 m2 = &alias->d_parent->d_inode->i_rwsem; in __d_unalias()
2957 * d_splice_alias - splice a disconnected dentry into the tree if one exists
2965 * If a non-IS_ROOT directory is found, the filesystem is corrupt, and
2972 * is returned. This matches the expected return value of ->lookup.
2990 spin_lock(&inode->i_lock); in d_splice_alias()
2991 if (S_ISDIR(inode->i_mode)) { in d_splice_alias()
2995 spin_unlock(&inode->i_lock); in d_splice_alias()
3000 new = ERR_PTR(-ELOOP); in d_splice_alias()
3004 dentry->d_name.name, in d_splice_alias()
3005 inode->i_sb->s_type->name, in d_splice_alias()
3006 inode->i_sb->s_id); in d_splice_alias()
3008 struct dentry *old_parent = dget(new->d_parent); in d_splice_alias()
3037 * is_subdir - is new dentry a subdirectory of old_dentry
3074 if (d_unhashed(dentry) || !dentry->d_inode) in d_genocide_kill()
3077 if (!(dentry->d_flags & DCACHE_GENOCIDE)) { in d_genocide_kill()
3078 dentry->d_flags |= DCACHE_GENOCIDE; in d_genocide_kill()
3079 dentry->d_lockref.count--; in d_genocide_kill()
3092 struct dentry *dentry = file->f_path.dentry; in d_mark_tmpfile()
3094 BUG_ON(dentry->d_name.name != dentry->d_iname || in d_mark_tmpfile()
3095 !hlist_unhashed(&dentry->d_u.d_alias) || in d_mark_tmpfile()
3097 spin_lock(&dentry->d_parent->d_lock); in d_mark_tmpfile()
3098 spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); in d_mark_tmpfile()
3099 dentry->d_name.len = sprintf(dentry->d_iname, "#%llu", in d_mark_tmpfile()
3100 (unsigned long long)inode->i_ino); in d_mark_tmpfile()
3101 spin_unlock(&dentry->d_lock); in d_mark_tmpfile()
3102 spin_unlock(&dentry->d_parent->d_lock); in d_mark_tmpfile()
3108 struct dentry *dentry = file->f_path.dentry; in d_tmpfile()
3127 seq = raw_seqcount_begin(&dentry->d_seq); in d_parent_ino()
3128 parent = READ_ONCE(dentry->d_parent); in d_parent_ino()
3131 ret = iparent->i_ino; in d_parent_ino()
3132 if (!read_seqcount_retry(&dentry->d_seq, seq)) in d_parent_ino()
3137 spin_lock(&dentry->d_lock); in d_parent_ino()
3138 ret = dentry->d_parent->d_inode->i_ino; in d_parent_ino()
3139 spin_unlock(&dentry->d_lock); in d_parent_ino()
3172 d_hash_shift = 32 - d_hash_shift; in dcache_init_early()
3203 d_hash_shift = 32 - d_hash_shift; in dcache_init()