fs: dcache scale dentry refcount

Make d_count non-atomic and protect it with d_lock. This allows us to ensure a
0 refcount dentry remains 0 without dcache_lock. It is also fairly natural when
we start protecting many other dentry members with d_lock.

Signed-off-by: Nick Piggin <npiggin@kernel.dk>
This commit is contained in:
Nick Piggin
2011-01-07 17:49:32 +11:00
parent 2304450783
commit b7ab39f631
21 changed files with 126 additions and 69 deletions

View File

@@ -45,6 +45,7 @@
* - d_flags
* - d_name
* - d_lru
* - d_count
*
* Ordering:
* dcache_lock
@@ -125,6 +126,7 @@ static void __d_free(struct rcu_head *head)
*/
static void d_free(struct dentry *dentry)
{
BUG_ON(dentry->d_count);
this_cpu_dec(nr_dentry);
if (dentry->d_op && dentry->d_op->d_release)
dentry->d_op->d_release(dentry);
@@ -222,8 +224,11 @@ static struct dentry *d_kill(struct dentry *dentry)
struct dentry *parent;
list_del(&dentry->d_u.d_child);
/*drops the locks, at that point nobody can reach this dentry */
dentry_iput(dentry);
/*
* dentry_iput drops the locks, at which point nobody (except
* transient RCU lookups) can reach this dentry.
*/
if (IS_ROOT(dentry))
parent = NULL;
else
@@ -303,13 +308,23 @@ void dput(struct dentry *dentry)
return;
repeat:
if (atomic_read(&dentry->d_count) == 1)
if (dentry->d_count == 1)
might_sleep();
if (!atomic_dec_and_lock(&dentry->d_count, &dcache_lock))
return;
spin_lock(&dentry->d_lock);
if (atomic_read(&dentry->d_count)) {
if (dentry->d_count == 1) {
if (!spin_trylock(&dcache_lock)) {
/*
* Something of a livelock possibility we could avoid
* by taking dcache_lock and trying again, but we
* want to reduce dcache_lock anyway so this will
* get improved.
*/
spin_unlock(&dentry->d_lock);
goto repeat;
}
}
dentry->d_count--;
if (dentry->d_count) {
spin_unlock(&dentry->d_lock);
spin_unlock(&dcache_lock);
return;
@@ -389,7 +404,7 @@ int d_invalidate(struct dentry * dentry)
* working directory or similar).
*/
spin_lock(&dentry->d_lock);
if (atomic_read(&dentry->d_count) > 1) {
if (dentry->d_count > 1) {
if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) {
spin_unlock(&dentry->d_lock);
spin_unlock(&dcache_lock);
@@ -404,29 +419,61 @@ int d_invalidate(struct dentry * dentry)
}
EXPORT_SYMBOL(d_invalidate);
/* This should be called _only_ with dcache_lock held */
/* This must be called with dcache_lock and d_lock held */
static inline struct dentry * __dget_locked_dlock(struct dentry *dentry)
{
atomic_inc(&dentry->d_count);
dentry->d_count++;
dentry_lru_del(dentry);
return dentry;
}
/* This should be called _only_ with dcache_lock held */
static inline struct dentry * __dget_locked(struct dentry *dentry)
{
atomic_inc(&dentry->d_count);
spin_lock(&dentry->d_lock);
dentry_lru_del(dentry);
__dget_locked_dlock(dentry);
spin_unlock(&dentry->d_lock);
return dentry;
}
struct dentry * dget_locked_dlock(struct dentry *dentry)
{
return __dget_locked_dlock(dentry);
}
struct dentry * dget_locked(struct dentry *dentry)
{
return __dget_locked(dentry);
}
EXPORT_SYMBOL(dget_locked);
struct dentry *dget_parent(struct dentry *dentry)
{
struct dentry *ret;
repeat:
spin_lock(&dentry->d_lock);
ret = dentry->d_parent;
if (!ret)
goto out;
if (dentry == ret) {
ret->d_count++;
goto out;
}
if (!spin_trylock(&ret->d_lock)) {
spin_unlock(&dentry->d_lock);
cpu_relax();
goto repeat;
}
BUG_ON(!ret->d_count);
ret->d_count++;
spin_unlock(&ret->d_lock);
out:
spin_unlock(&dentry->d_lock);
return ret;
}
EXPORT_SYMBOL(dget_parent);
/**
* d_find_alias - grab a hashed alias of inode
* @inode: inode in question
@@ -495,7 +542,7 @@ restart:
spin_lock(&dcache_lock);
list_for_each_entry(dentry, &inode->i_dentry, d_alias) {
spin_lock(&dentry->d_lock);
if (!atomic_read(&dentry->d_count)) {
if (!dentry->d_count) {
__dget_locked_dlock(dentry);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
@@ -530,7 +577,10 @@ static void prune_one_dentry(struct dentry * dentry)
*/
while (dentry) {
spin_lock(&dcache_lock);
if (!atomic_dec_and_lock(&dentry->d_count, &dentry->d_lock)) {
spin_lock(&dentry->d_lock);
dentry->d_count--;
if (dentry->d_count) {
spin_unlock(&dentry->d_lock);
spin_unlock(&dcache_lock);
return;
}
@@ -562,7 +612,7 @@ static void shrink_dentry_list(struct list_head *list)
* the LRU because of laziness during lookup. Do not free
* it - just keep it off the LRU list.
*/
if (atomic_read(&dentry->d_count)) {
if (dentry->d_count) {
spin_unlock(&dentry->d_lock);
continue;
}
@@ -783,7 +833,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
do {
struct inode *inode;
if (atomic_read(&dentry->d_count) != 0) {
if (dentry->d_count != 0) {
printk(KERN_ERR
"BUG: Dentry %p{i=%lx,n=%s}"
" still in use (%d)"
@@ -792,7 +842,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
dentry->d_inode ?
dentry->d_inode->i_ino : 0UL,
dentry->d_name.name,
atomic_read(&dentry->d_count),
dentry->d_count,
dentry->d_sb->s_type->name,
dentry->d_sb->s_id);
BUG();
@@ -802,7 +852,9 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry)
parent = NULL;
else {
parent = dentry->d_parent;
atomic_dec(&parent->d_count);
spin_lock(&parent->d_lock);
parent->d_count--;
spin_unlock(&parent->d_lock);
}
list_del(&dentry->d_u.d_child);
@@ -853,7 +905,9 @@ void shrink_dcache_for_umount(struct super_block *sb)
dentry = sb->s_root;
sb->s_root = NULL;
atomic_dec(&dentry->d_count);
spin_lock(&dentry->d_lock);
dentry->d_count--;
spin_unlock(&dentry->d_lock);
shrink_dcache_for_umount_subtree(dentry);
while (!hlist_empty(&sb->s_anon)) {
@@ -950,7 +1004,7 @@ resume:
* move only zero ref count dentries to the end
* of the unused list for prune_dcache
*/
if (!atomic_read(&dentry->d_count)) {
if (!dentry->d_count) {
dentry_lru_move_tail(dentry);
found++;
} else {
@@ -1068,7 +1122,7 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name)
memcpy(dname, name->name, name->len);
dname[name->len] = 0;
atomic_set(&dentry->d_count, 1);
dentry->d_count = 1;
dentry->d_flags = DCACHE_UNHASHED;
spin_lock_init(&dentry->d_lock);
dentry->d_inode = NULL;
@@ -1556,7 +1610,7 @@ struct dentry * __d_lookup(struct dentry * parent, struct qstr * name)
goto next;
}
atomic_inc(&dentry->d_count);
dentry->d_count++;
found = dentry;
spin_unlock(&dentry->d_lock);
break;
@@ -1653,7 +1707,7 @@ void d_delete(struct dentry * dentry)
spin_lock(&dcache_lock);
spin_lock(&dentry->d_lock);
isdir = S_ISDIR(dentry->d_inode->i_mode);
if (atomic_read(&dentry->d_count) == 1) {
if (dentry->d_count == 1) {
dentry->d_flags &= ~DCACHE_CANT_MOUNT;
dentry_iput(dentry);
fsnotify_nameremove(dentry, isdir);
@@ -2494,11 +2548,15 @@ resume:
this_parent = dentry;
goto repeat;
}
atomic_dec(&dentry->d_count);
spin_lock(&dentry->d_lock);
dentry->d_count--;
spin_unlock(&dentry->d_lock);
}
if (this_parent != root) {
next = this_parent->d_u.d_child.next;
atomic_dec(&this_parent->d_count);
spin_lock(&this_parent->d_lock);
this_parent->d_count--;
spin_unlock(&this_parent->d_lock);
this_parent = this_parent->d_parent;
goto resume;
}