forked from luck/tmp_suning_uos_patched
btrfs: migrate the block group lookup code
Move these bits first as they are the easiest to move. Export two of the helpers so they can be moved all at once. Signed-off-by: Josef Bacik <josef@toxicpanda.com> Reviewed-by: David Sterba <dsterba@suse.com> [ minor style updates ] Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
parent
aac0023c21
commit
2e405ad842
|
@ -11,7 +11,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
|
|||
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
|
||||
reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \
|
||||
uuid-tree.o props.o free-space-tree.o tree-checker.o space-info.o \
|
||||
block-rsv.o delalloc-space.o
|
||||
block-rsv.o delalloc-space.o block-group.o
|
||||
|
||||
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
|
||||
btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
|
||||
|
|
95
fs/btrfs/block-group.c
Normal file
95
fs/btrfs/block-group.c
Normal file
|
@ -0,0 +1,95 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
#include "ctree.h"
|
||||
#include "block-group.h"
|
||||
|
||||
/*
|
||||
* This will return the block group at or after bytenr if contains is 0, else
|
||||
* it will return the block group that contains the bytenr
|
||||
*/
|
||||
static struct btrfs_block_group_cache *block_group_cache_tree_search(
|
||||
struct btrfs_fs_info *info, u64 bytenr, int contains)
|
||||
{
|
||||
struct btrfs_block_group_cache *cache, *ret = NULL;
|
||||
struct rb_node *n;
|
||||
u64 end, start;
|
||||
|
||||
spin_lock(&info->block_group_cache_lock);
|
||||
n = info->block_group_cache_tree.rb_node;
|
||||
|
||||
while (n) {
|
||||
cache = rb_entry(n, struct btrfs_block_group_cache,
|
||||
cache_node);
|
||||
end = cache->key.objectid + cache->key.offset - 1;
|
||||
start = cache->key.objectid;
|
||||
|
||||
if (bytenr < start) {
|
||||
if (!contains && (!ret || start < ret->key.objectid))
|
||||
ret = cache;
|
||||
n = n->rb_left;
|
||||
} else if (bytenr > start) {
|
||||
if (contains && bytenr <= end) {
|
||||
ret = cache;
|
||||
break;
|
||||
}
|
||||
n = n->rb_right;
|
||||
} else {
|
||||
ret = cache;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (ret) {
|
||||
btrfs_get_block_group(ret);
|
||||
if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
|
||||
info->first_logical_byte = ret->key.objectid;
|
||||
}
|
||||
spin_unlock(&info->block_group_cache_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the block group that starts at or after bytenr
|
||||
*/
|
||||
struct btrfs_block_group_cache *btrfs_lookup_first_block_group(
|
||||
struct btrfs_fs_info *info, u64 bytenr)
|
||||
{
|
||||
return block_group_cache_tree_search(info, bytenr, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the block group that contains the given bytenr
|
||||
*/
|
||||
struct btrfs_block_group_cache *btrfs_lookup_block_group(
|
||||
struct btrfs_fs_info *info, u64 bytenr)
|
||||
{
|
||||
return block_group_cache_tree_search(info, bytenr, 1);
|
||||
}
|
||||
|
||||
struct btrfs_block_group_cache *btrfs_next_block_group(
|
||||
struct btrfs_block_group_cache *cache)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = cache->fs_info;
|
||||
struct rb_node *node;
|
||||
|
||||
spin_lock(&fs_info->block_group_cache_lock);
|
||||
|
||||
/* If our block group was removed, we need a full search. */
|
||||
if (RB_EMPTY_NODE(&cache->cache_node)) {
|
||||
const u64 next_bytenr = cache->key.objectid + cache->key.offset;
|
||||
|
||||
spin_unlock(&fs_info->block_group_cache_lock);
|
||||
btrfs_put_block_group(cache);
|
||||
cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
|
||||
}
|
||||
node = rb_next(&cache->cache_node);
|
||||
btrfs_put_block_group(cache);
|
||||
if (node) {
|
||||
cache = rb_entry(node, struct btrfs_block_group_cache,
|
||||
cache_node);
|
||||
btrfs_get_block_group(cache);
|
||||
} else
|
||||
cache = NULL;
|
||||
spin_unlock(&fs_info->block_group_cache_lock);
|
||||
return cache;
|
||||
}
|
|
@ -151,4 +151,11 @@ static inline int btrfs_should_fragment_free_space(
|
|||
}
|
||||
#endif
|
||||
|
||||
struct btrfs_block_group_cache *btrfs_lookup_first_block_group(
|
||||
struct btrfs_fs_info *info, u64 bytenr);
|
||||
struct btrfs_block_group_cache *btrfs_lookup_block_group(
|
||||
struct btrfs_fs_info *info, u64 bytenr);
|
||||
struct btrfs_block_group_cache *btrfs_next_block_group(
|
||||
struct btrfs_block_group_cache *cache);
|
||||
|
||||
#endif /* BTRFS_BLOCK_GROUP_H */
|
||||
|
|
|
@ -2496,9 +2496,6 @@ int btrfs_pin_extent_for_log_replay(struct btrfs_fs_info *fs_info,
|
|||
int btrfs_exclude_logged_extents(struct extent_buffer *eb);
|
||||
int btrfs_cross_ref_exist(struct btrfs_root *root,
|
||||
u64 objectid, u64 offset, u64 bytenr);
|
||||
struct btrfs_block_group_cache *btrfs_lookup_block_group(
|
||||
struct btrfs_fs_info *info,
|
||||
u64 bytenr);
|
||||
void btrfs_get_block_group(struct btrfs_block_group_cache *cache);
|
||||
void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
|
||||
struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
|
||||
|
|
|
@ -133,52 +133,6 @@ static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This will return the block group at or after bytenr if contains is 0, else
|
||||
* it will return the block group that contains the bytenr
|
||||
*/
|
||||
static struct btrfs_block_group_cache *
|
||||
block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
|
||||
int contains)
|
||||
{
|
||||
struct btrfs_block_group_cache *cache, *ret = NULL;
|
||||
struct rb_node *n;
|
||||
u64 end, start;
|
||||
|
||||
spin_lock(&info->block_group_cache_lock);
|
||||
n = info->block_group_cache_tree.rb_node;
|
||||
|
||||
while (n) {
|
||||
cache = rb_entry(n, struct btrfs_block_group_cache,
|
||||
cache_node);
|
||||
end = cache->key.objectid + cache->key.offset - 1;
|
||||
start = cache->key.objectid;
|
||||
|
||||
if (bytenr < start) {
|
||||
if (!contains && (!ret || start < ret->key.objectid))
|
||||
ret = cache;
|
||||
n = n->rb_left;
|
||||
} else if (bytenr > start) {
|
||||
if (contains && bytenr <= end) {
|
||||
ret = cache;
|
||||
break;
|
||||
}
|
||||
n = n->rb_right;
|
||||
} else {
|
||||
ret = cache;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (ret) {
|
||||
btrfs_get_block_group(ret);
|
||||
if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
|
||||
info->first_logical_byte = ret->key.objectid;
|
||||
}
|
||||
spin_unlock(&info->block_group_cache_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int add_excluded_extent(struct btrfs_fs_info *fs_info,
|
||||
u64 start, u64 num_bytes)
|
||||
{
|
||||
|
@ -673,24 +627,6 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* return the block group that starts at or after bytenr
|
||||
*/
|
||||
static struct btrfs_block_group_cache *
|
||||
btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
|
||||
{
|
||||
return block_group_cache_tree_search(info, bytenr, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* return the block group that contains the given bytenr
|
||||
*/
|
||||
struct btrfs_block_group_cache *btrfs_lookup_block_group(
|
||||
struct btrfs_fs_info *info,
|
||||
u64 bytenr)
|
||||
{
|
||||
return block_group_cache_tree_search(info, bytenr, 1);
|
||||
}
|
||||
|
||||
static u64 generic_ref_to_space_flags(struct btrfs_ref *ref)
|
||||
{
|
||||
|
@ -3146,34 +3082,6 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
|
|||
|
||||
}
|
||||
|
||||
static struct btrfs_block_group_cache *next_block_group(
|
||||
struct btrfs_block_group_cache *cache)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = cache->fs_info;
|
||||
struct rb_node *node;
|
||||
|
||||
spin_lock(&fs_info->block_group_cache_lock);
|
||||
|
||||
/* If our block group was removed, we need a full search. */
|
||||
if (RB_EMPTY_NODE(&cache->cache_node)) {
|
||||
const u64 next_bytenr = cache->key.objectid + cache->key.offset;
|
||||
|
||||
spin_unlock(&fs_info->block_group_cache_lock);
|
||||
btrfs_put_block_group(cache);
|
||||
cache = btrfs_lookup_first_block_group(fs_info, next_bytenr); return cache;
|
||||
}
|
||||
node = rb_next(&cache->cache_node);
|
||||
btrfs_put_block_group(cache);
|
||||
if (node) {
|
||||
cache = rb_entry(node, struct btrfs_block_group_cache,
|
||||
cache_node);
|
||||
btrfs_get_block_group(cache);
|
||||
} else
|
||||
cache = NULL;
|
||||
spin_unlock(&fs_info->block_group_cache_lock);
|
||||
return cache;
|
||||
}
|
||||
|
||||
static int cache_save_setup(struct btrfs_block_group_cache *block_group,
|
||||
struct btrfs_trans_handle *trans,
|
||||
struct btrfs_path *path)
|
||||
|
@ -7651,7 +7559,7 @@ void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
|
|||
if (block_group->iref)
|
||||
break;
|
||||
spin_unlock(&block_group->lock);
|
||||
block_group = next_block_group(block_group);
|
||||
block_group = btrfs_next_block_group(block_group);
|
||||
}
|
||||
if (!block_group) {
|
||||
if (last == 0)
|
||||
|
@ -8872,7 +8780,7 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
|
|||
return -EINVAL;
|
||||
|
||||
cache = btrfs_lookup_first_block_group(fs_info, range->start);
|
||||
for (; cache; cache = next_block_group(cache)) {
|
||||
for (; cache; cache = btrfs_next_block_group(cache)) {
|
||||
if (cache->key.objectid >= range_end) {
|
||||
btrfs_put_block_group(cache);
|
||||
break;
|
||||
|
|
Loading…
Reference in New Issue
Block a user