forked from luck/tmp_suning_uos_patched
Btrfs: change reserved_extents to an atomic_t
We track delayed allocation per inodes via 2 counters, one is outstanding_extents and reserved_extents. Outstanding_extents is already an atomic_t, but reserved_extents is not and is protected by a spinlock. So convert this to an atomic_t and instead of using a spinlock, use atomic_cmpxchg when releasing delalloc bytes. This makes our inode 72 bytes smaller, and reduces locking overhead (albiet it was minimal to begin with). Thanks, Signed-off-by: Josef Bacik <josef@redhat.com>
This commit is contained in:
parent
4a64001f00
commit
57a45ced94
|
@ -136,9 +136,8 @@ struct btrfs_inode {
|
|||
* items we think we'll end up using, and reserved_extents is the number
|
||||
* of extent items we've reserved metadata for.
|
||||
*/
|
||||
spinlock_t accounting_lock;
|
||||
atomic_t outstanding_extents;
|
||||
int reserved_extents;
|
||||
atomic_t reserved_extents;
|
||||
|
||||
/*
|
||||
* ordered_data_close is set by truncate when a file that used
|
||||
|
|
|
@ -3996,6 +3996,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
|
|||
struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
|
||||
u64 to_reserve;
|
||||
int nr_extents;
|
||||
int reserved_extents;
|
||||
int ret;
|
||||
|
||||
if (btrfs_transaction_in_commit(root->fs_info))
|
||||
|
@ -4003,25 +4004,24 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
|
|||
|
||||
num_bytes = ALIGN(num_bytes, root->sectorsize);
|
||||
|
||||
spin_lock(&BTRFS_I(inode)->accounting_lock);
|
||||
nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1;
|
||||
if (nr_extents > BTRFS_I(inode)->reserved_extents) {
|
||||
nr_extents -= BTRFS_I(inode)->reserved_extents;
|
||||
reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents);
|
||||
|
||||
if (nr_extents > reserved_extents) {
|
||||
nr_extents -= reserved_extents;
|
||||
to_reserve = calc_trans_metadata_size(root, nr_extents);
|
||||
} else {
|
||||
nr_extents = 0;
|
||||
to_reserve = 0;
|
||||
}
|
||||
spin_unlock(&BTRFS_I(inode)->accounting_lock);
|
||||
|
||||
to_reserve += calc_csum_metadata_size(inode, num_bytes);
|
||||
ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
spin_lock(&BTRFS_I(inode)->accounting_lock);
|
||||
BTRFS_I(inode)->reserved_extents += nr_extents;
|
||||
atomic_add(nr_extents, &BTRFS_I(inode)->reserved_extents);
|
||||
atomic_inc(&BTRFS_I(inode)->outstanding_extents);
|
||||
spin_unlock(&BTRFS_I(inode)->accounting_lock);
|
||||
|
||||
block_rsv_add_bytes(block_rsv, to_reserve, 1);
|
||||
|
||||
|
@ -4036,20 +4036,30 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
|
|||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
u64 to_free;
|
||||
int nr_extents;
|
||||
int reserved_extents;
|
||||
|
||||
num_bytes = ALIGN(num_bytes, root->sectorsize);
|
||||
atomic_dec(&BTRFS_I(inode)->outstanding_extents);
|
||||
WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0);
|
||||
|
||||
spin_lock(&BTRFS_I(inode)->accounting_lock);
|
||||
nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
|
||||
if (nr_extents < BTRFS_I(inode)->reserved_extents) {
|
||||
nr_extents = BTRFS_I(inode)->reserved_extents - nr_extents;
|
||||
BTRFS_I(inode)->reserved_extents -= nr_extents;
|
||||
} else {
|
||||
nr_extents = 0;
|
||||
}
|
||||
spin_unlock(&BTRFS_I(inode)->accounting_lock);
|
||||
reserved_extents = atomic_read(&BTRFS_I(inode)->reserved_extents);
|
||||
do {
|
||||
int old, new;
|
||||
|
||||
nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
|
||||
if (nr_extents >= reserved_extents) {
|
||||
nr_extents = 0;
|
||||
break;
|
||||
}
|
||||
old = reserved_extents;
|
||||
nr_extents = reserved_extents - nr_extents;
|
||||
new = reserved_extents - nr_extents;
|
||||
old = atomic_cmpxchg(&BTRFS_I(inode)->reserved_extents,
|
||||
reserved_extents, new);
|
||||
if (likely(old == reserved_extents))
|
||||
break;
|
||||
reserved_extents = old;
|
||||
} while (1);
|
||||
|
||||
to_free = calc_csum_metadata_size(inode, num_bytes);
|
||||
if (nr_extents > 0)
|
||||
|
|
|
@ -6632,9 +6632,8 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
|
|||
ei->index_cnt = (u64)-1;
|
||||
ei->last_unlink_trans = 0;
|
||||
|
||||
spin_lock_init(&ei->accounting_lock);
|
||||
atomic_set(&ei->outstanding_extents, 0);
|
||||
ei->reserved_extents = 0;
|
||||
atomic_set(&ei->reserved_extents, 0);
|
||||
|
||||
ei->ordered_data_close = 0;
|
||||
ei->orphan_meta_reserved = 0;
|
||||
|
@ -6670,7 +6669,7 @@ void btrfs_destroy_inode(struct inode *inode)
|
|||
WARN_ON(!list_empty(&inode->i_dentry));
|
||||
WARN_ON(inode->i_data.nrpages);
|
||||
WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents));
|
||||
WARN_ON(BTRFS_I(inode)->reserved_extents);
|
||||
WARN_ON(atomic_read(&BTRFS_I(inode)->reserved_extents));
|
||||
|
||||
/*
|
||||
* This can happen where we create an inode, but somebody else also
|
||||
|
|
Loading…
Reference in New Issue
Block a user