btrfs: free delayed node outside of root->inode_lock

On heavy workloads, we're seeing soft lockup warnings on
root->inode_lock in __btrfs_release_delayed_node. The low hanging fruit
is to reduce the size of the critical section.

Signed-off-by: Jeff Mahoney <jeffm@suse.com>
Reviewed-by: David Sterba <dsterba@suse.cz>
Signed-off-by: Chris Mason <clm@fb.com>
This commit is contained in:
Jeff Mahoney 2014-05-27 13:53:20 -04:00 committed by Chris Mason
parent 902c68a4da
commit 964930312a

View File

@ -149,8 +149,8 @@ static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node(
spin_lock(&root->inode_lock);
ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node);
if (ret == -EEXIST) {
kmem_cache_free(delayed_node_cache, node);
spin_unlock(&root->inode_lock);
kmem_cache_free(delayed_node_cache, node);
radix_tree_preload_end();
goto again;
}
@ -267,14 +267,17 @@ static void __btrfs_release_delayed_node(
mutex_unlock(&delayed_node->mutex);
if (atomic_dec_and_test(&delayed_node->refs)) {
bool free = false;
struct btrfs_root *root = delayed_node->root;
spin_lock(&root->inode_lock);
if (atomic_read(&delayed_node->refs) == 0) {
radix_tree_delete(&root->delayed_nodes_tree,
delayed_node->inode_id);
kmem_cache_free(delayed_node_cache, delayed_node);
free = true;
}
spin_unlock(&root->inode_lock);
if (free)
kmem_cache_free(delayed_node_cache, delayed_node);
}
}