forked from luck/tmp_suning_uos_patched
Btrfs: traverse and flush the delalloc inodes once
btrfs_start_delalloc_inodes() needn't traverse and flush the delalloc inodes repeatedly. It is because we can regard the data that the users write after we start delalloc inodes flush as the one which is after the delalloc inodes flush is done, and we can flush it next time. Signed-off-by: Miao Xie <miaox@cn.fujitsu.com> Signed-off-by: Josef Bacik <jbacik@fusionio.com>
This commit is contained in:
parent
eebc608406
commit
63607cc86a
@ -7614,7 +7614,7 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
|
||||
|
||||
INIT_LIST_HEAD(&works);
|
||||
INIT_LIST_HEAD(&splice);
|
||||
again:
|
||||
|
||||
spin_lock(&root->fs_info->delalloc_lock);
|
||||
list_splice_init(&root->fs_info->delalloc_inodes, &splice);
|
||||
while (!list_empty(&splice)) {
|
||||
@ -7650,13 +7650,6 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput)
|
||||
btrfs_wait_and_free_delalloc_work(work);
|
||||
}
|
||||
|
||||
spin_lock(&root->fs_info->delalloc_lock);
|
||||
if (!list_empty(&root->fs_info->delalloc_inodes)) {
|
||||
spin_unlock(&root->fs_info->delalloc_lock);
|
||||
goto again;
|
||||
}
|
||||
spin_unlock(&root->fs_info->delalloc_lock);
|
||||
|
||||
/* the filemap_flush will queue IO into the worker threads, but
|
||||
* we have to make sure the IO is actually started and that
|
||||
* ordered extents get created before we return
|
||||
|
Loading…
Reference in New Issue
Block a user