forked from luck/tmp_suning_uos_patched
Merge branches 'for-linus' and 'for-linus-3.2' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: Btrfs: unplug every once and a while Btrfs: deal with NULL srv_rsv in the delalloc inode reservation code Btrfs: only set cache_generation if we setup the block group Btrfs: don't panic if orphan item already exists Btrfs: fix leaked space in truncate Btrfs: fix how we do delalloc reservations and how we free reservations on error Btrfs: deal with enospc from dirtying inodes properly Btrfs: fix num_workers_starting bug and other bugs in async thread BTRFS: Establish i_ops before calling d_instantiate Btrfs: add a cond_resched() into the worker loop Btrfs: fix ctime update of on-disk inode btrfs: keep orphans for subvolume deletion Btrfs: fix inaccurate available space on raid0 profile Btrfs: fix wrong disk space information of the files Btrfs: fix wrong i_size when truncating a file to a larger size Btrfs: fix btrfs_end_bio to deal with write errors to a single mirror * 'for-linus-3.2' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: btrfs: lower the dirty balance poll interval
This commit is contained in:
commit
c9a7fe9672
|
@ -64,6 +64,8 @@ struct btrfs_worker_thread {
|
|||
int idle;
|
||||
};
|
||||
|
||||
static int __btrfs_start_workers(struct btrfs_workers *workers);
|
||||
|
||||
/*
|
||||
* btrfs_start_workers uses kthread_run, which can block waiting for memory
|
||||
* for a very long time. It will actually throttle on page writeback,
|
||||
|
@ -88,27 +90,10 @@ static void start_new_worker_func(struct btrfs_work *work)
|
|||
{
|
||||
struct worker_start *start;
|
||||
start = container_of(work, struct worker_start, work);
|
||||
btrfs_start_workers(start->queue, 1);
|
||||
__btrfs_start_workers(start->queue);
|
||||
kfree(start);
|
||||
}
|
||||
|
||||
static int start_new_worker(struct btrfs_workers *queue)
|
||||
{
|
||||
struct worker_start *start;
|
||||
int ret;
|
||||
|
||||
start = kzalloc(sizeof(*start), GFP_NOFS);
|
||||
if (!start)
|
||||
return -ENOMEM;
|
||||
|
||||
start->work.func = start_new_worker_func;
|
||||
start->queue = queue;
|
||||
ret = btrfs_queue_worker(queue->atomic_worker_start, &start->work);
|
||||
if (ret)
|
||||
kfree(start);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* helper function to move a thread onto the idle list after it
|
||||
* has finished some requests.
|
||||
|
@ -153,12 +138,20 @@ static void check_busy_worker(struct btrfs_worker_thread *worker)
|
|||
static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
|
||||
{
|
||||
struct btrfs_workers *workers = worker->workers;
|
||||
struct worker_start *start;
|
||||
unsigned long flags;
|
||||
|
||||
rmb();
|
||||
if (!workers->atomic_start_pending)
|
||||
return;
|
||||
|
||||
start = kzalloc(sizeof(*start), GFP_NOFS);
|
||||
if (!start)
|
||||
return;
|
||||
|
||||
start->work.func = start_new_worker_func;
|
||||
start->queue = workers;
|
||||
|
||||
spin_lock_irqsave(&workers->lock, flags);
|
||||
if (!workers->atomic_start_pending)
|
||||
goto out;
|
||||
|
@ -170,10 +163,11 @@ static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
|
|||
|
||||
workers->num_workers_starting += 1;
|
||||
spin_unlock_irqrestore(&workers->lock, flags);
|
||||
start_new_worker(workers);
|
||||
btrfs_queue_worker(workers->atomic_worker_start, &start->work);
|
||||
return;
|
||||
|
||||
out:
|
||||
kfree(start);
|
||||
spin_unlock_irqrestore(&workers->lock, flags);
|
||||
}
|
||||
|
||||
|
@ -331,7 +325,7 @@ static int worker_loop(void *arg)
|
|||
run_ordered_completions(worker->workers, work);
|
||||
|
||||
check_pending_worker_creates(worker);
|
||||
|
||||
cond_resched();
|
||||
}
|
||||
|
||||
spin_lock_irq(&worker->lock);
|
||||
|
@ -462,56 +456,55 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
|
|||
* starts new worker threads. This does not enforce the max worker
|
||||
* count in case you need to temporarily go past it.
|
||||
*/
|
||||
static int __btrfs_start_workers(struct btrfs_workers *workers,
|
||||
int num_workers)
|
||||
static int __btrfs_start_workers(struct btrfs_workers *workers)
|
||||
{
|
||||
struct btrfs_worker_thread *worker;
|
||||
int ret = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < num_workers; i++) {
|
||||
worker = kzalloc(sizeof(*worker), GFP_NOFS);
|
||||
if (!worker) {
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&worker->pending);
|
||||
INIT_LIST_HEAD(&worker->prio_pending);
|
||||
INIT_LIST_HEAD(&worker->worker_list);
|
||||
spin_lock_init(&worker->lock);
|
||||
|
||||
atomic_set(&worker->num_pending, 0);
|
||||
atomic_set(&worker->refs, 1);
|
||||
worker->workers = workers;
|
||||
worker->task = kthread_run(worker_loop, worker,
|
||||
"btrfs-%s-%d", workers->name,
|
||||
workers->num_workers + i);
|
||||
if (IS_ERR(worker->task)) {
|
||||
ret = PTR_ERR(worker->task);
|
||||
kfree(worker);
|
||||
goto fail;
|
||||
}
|
||||
spin_lock_irq(&workers->lock);
|
||||
list_add_tail(&worker->worker_list, &workers->idle_list);
|
||||
worker->idle = 1;
|
||||
workers->num_workers++;
|
||||
workers->num_workers_starting--;
|
||||
WARN_ON(workers->num_workers_starting < 0);
|
||||
spin_unlock_irq(&workers->lock);
|
||||
worker = kzalloc(sizeof(*worker), GFP_NOFS);
|
||||
if (!worker) {
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&worker->pending);
|
||||
INIT_LIST_HEAD(&worker->prio_pending);
|
||||
INIT_LIST_HEAD(&worker->worker_list);
|
||||
spin_lock_init(&worker->lock);
|
||||
|
||||
atomic_set(&worker->num_pending, 0);
|
||||
atomic_set(&worker->refs, 1);
|
||||
worker->workers = workers;
|
||||
worker->task = kthread_run(worker_loop, worker,
|
||||
"btrfs-%s-%d", workers->name,
|
||||
workers->num_workers + 1);
|
||||
if (IS_ERR(worker->task)) {
|
||||
ret = PTR_ERR(worker->task);
|
||||
kfree(worker);
|
||||
goto fail;
|
||||
}
|
||||
spin_lock_irq(&workers->lock);
|
||||
list_add_tail(&worker->worker_list, &workers->idle_list);
|
||||
worker->idle = 1;
|
||||
workers->num_workers++;
|
||||
workers->num_workers_starting--;
|
||||
WARN_ON(workers->num_workers_starting < 0);
|
||||
spin_unlock_irq(&workers->lock);
|
||||
|
||||
return 0;
|
||||
fail:
|
||||
btrfs_stop_workers(workers);
|
||||
spin_lock_irq(&workers->lock);
|
||||
workers->num_workers_starting--;
|
||||
spin_unlock_irq(&workers->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
|
||||
int btrfs_start_workers(struct btrfs_workers *workers)
|
||||
{
|
||||
spin_lock_irq(&workers->lock);
|
||||
workers->num_workers_starting += num_workers;
|
||||
workers->num_workers_starting++;
|
||||
spin_unlock_irq(&workers->lock);
|
||||
return __btrfs_start_workers(workers, num_workers);
|
||||
return __btrfs_start_workers(workers);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -568,6 +561,7 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
|
|||
struct btrfs_worker_thread *worker;
|
||||
unsigned long flags;
|
||||
struct list_head *fallback;
|
||||
int ret;
|
||||
|
||||
again:
|
||||
spin_lock_irqsave(&workers->lock, flags);
|
||||
|
@ -584,7 +578,9 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
|
|||
workers->num_workers_starting++;
|
||||
spin_unlock_irqrestore(&workers->lock, flags);
|
||||
/* we're below the limit, start another worker */
|
||||
__btrfs_start_workers(workers, 1);
|
||||
ret = __btrfs_start_workers(workers);
|
||||
if (ret)
|
||||
goto fallback;
|
||||
goto again;
|
||||
}
|
||||
}
|
||||
|
@ -665,7 +661,7 @@ void btrfs_set_work_high_prio(struct btrfs_work *work)
|
|||
/*
|
||||
* places a struct btrfs_work into the pending queue of one of the kthreads
|
||||
*/
|
||||
int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
|
||||
void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
|
||||
{
|
||||
struct btrfs_worker_thread *worker;
|
||||
unsigned long flags;
|
||||
|
@ -673,7 +669,7 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
|
|||
|
||||
/* don't requeue something already on a list */
|
||||
if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags))
|
||||
goto out;
|
||||
return;
|
||||
|
||||
worker = find_worker(workers);
|
||||
if (workers->ordered) {
|
||||
|
@ -712,7 +708,4 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
|
|||
if (wake)
|
||||
wake_up_process(worker->task);
|
||||
spin_unlock_irqrestore(&worker->lock, flags);
|
||||
|
||||
out:
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -109,8 +109,8 @@ struct btrfs_workers {
|
|||
char *name;
|
||||
};
|
||||
|
||||
int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
|
||||
int btrfs_start_workers(struct btrfs_workers *workers, int num_workers);
|
||||
void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
|
||||
int btrfs_start_workers(struct btrfs_workers *workers);
|
||||
int btrfs_stop_workers(struct btrfs_workers *workers);
|
||||
void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
|
||||
struct btrfs_workers *async_starter);
|
||||
|
|
|
@ -2692,7 +2692,8 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
|
|||
int btrfs_readpage(struct file *file, struct page *page);
|
||||
void btrfs_evict_inode(struct inode *inode);
|
||||
int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
|
||||
void btrfs_dirty_inode(struct inode *inode, int flags);
|
||||
int btrfs_dirty_inode(struct inode *inode);
|
||||
int btrfs_update_time(struct file *file);
|
||||
struct inode *btrfs_alloc_inode(struct super_block *sb);
|
||||
void btrfs_destroy_inode(struct inode *inode);
|
||||
int btrfs_drop_inode(struct inode *inode);
|
||||
|
|
|
@ -640,8 +640,8 @@ static int btrfs_delayed_inode_reserve_metadata(
|
|||
* Now if src_rsv == delalloc_block_rsv we'll let it just steal since
|
||||
* we're accounted for.
|
||||
*/
|
||||
if (!trans->bytes_reserved &&
|
||||
src_rsv != &root->fs_info->delalloc_block_rsv) {
|
||||
if (!src_rsv || (!trans->bytes_reserved &&
|
||||
src_rsv != &root->fs_info->delalloc_block_rsv)) {
|
||||
ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
|
||||
/*
|
||||
* Since we're under a transaction reserve_metadata_bytes could
|
||||
|
|
|
@ -2194,19 +2194,27 @@ struct btrfs_root *open_ctree(struct super_block *sb,
|
|||
fs_info->endio_meta_write_workers.idle_thresh = 2;
|
||||
fs_info->readahead_workers.idle_thresh = 2;
|
||||
|
||||
btrfs_start_workers(&fs_info->workers, 1);
|
||||
btrfs_start_workers(&fs_info->generic_worker, 1);
|
||||
btrfs_start_workers(&fs_info->submit_workers, 1);
|
||||
btrfs_start_workers(&fs_info->delalloc_workers, 1);
|
||||
btrfs_start_workers(&fs_info->fixup_workers, 1);
|
||||
btrfs_start_workers(&fs_info->endio_workers, 1);
|
||||
btrfs_start_workers(&fs_info->endio_meta_workers, 1);
|
||||
btrfs_start_workers(&fs_info->endio_meta_write_workers, 1);
|
||||
btrfs_start_workers(&fs_info->endio_write_workers, 1);
|
||||
btrfs_start_workers(&fs_info->endio_freespace_worker, 1);
|
||||
btrfs_start_workers(&fs_info->delayed_workers, 1);
|
||||
btrfs_start_workers(&fs_info->caching_workers, 1);
|
||||
btrfs_start_workers(&fs_info->readahead_workers, 1);
|
||||
/*
|
||||
* btrfs_start_workers can really only fail because of ENOMEM so just
|
||||
* return -ENOMEM if any of these fail.
|
||||
*/
|
||||
ret = btrfs_start_workers(&fs_info->workers);
|
||||
ret |= btrfs_start_workers(&fs_info->generic_worker);
|
||||
ret |= btrfs_start_workers(&fs_info->submit_workers);
|
||||
ret |= btrfs_start_workers(&fs_info->delalloc_workers);
|
||||
ret |= btrfs_start_workers(&fs_info->fixup_workers);
|
||||
ret |= btrfs_start_workers(&fs_info->endio_workers);
|
||||
ret |= btrfs_start_workers(&fs_info->endio_meta_workers);
|
||||
ret |= btrfs_start_workers(&fs_info->endio_meta_write_workers);
|
||||
ret |= btrfs_start_workers(&fs_info->endio_write_workers);
|
||||
ret |= btrfs_start_workers(&fs_info->endio_freespace_worker);
|
||||
ret |= btrfs_start_workers(&fs_info->delayed_workers);
|
||||
ret |= btrfs_start_workers(&fs_info->caching_workers);
|
||||
ret |= btrfs_start_workers(&fs_info->readahead_workers);
|
||||
if (ret) {
|
||||
ret = -ENOMEM;
|
||||
goto fail_sb_buffer;
|
||||
}
|
||||
|
||||
fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super);
|
||||
fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages,
|
||||
|
|
|
@ -2822,7 +2822,7 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
|
|||
btrfs_release_path(path);
|
||||
out:
|
||||
spin_lock(&block_group->lock);
|
||||
if (!ret)
|
||||
if (!ret && dcs == BTRFS_DC_SETUP)
|
||||
block_group->cache_generation = trans->transid;
|
||||
block_group->disk_cache_state = dcs;
|
||||
spin_unlock(&block_group->lock);
|
||||
|
@ -4204,12 +4204,17 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
|
|||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
|
||||
u64 to_reserve = 0;
|
||||
u64 csum_bytes;
|
||||
unsigned nr_extents = 0;
|
||||
int extra_reserve = 0;
|
||||
int flush = 1;
|
||||
int ret;
|
||||
|
||||
/* Need to be holding the i_mutex here if we aren't free space cache */
|
||||
if (btrfs_is_free_space_inode(root, inode))
|
||||
flush = 0;
|
||||
else
|
||||
WARN_ON(!mutex_is_locked(&inode->i_mutex));
|
||||
|
||||
if (flush && btrfs_transaction_in_commit(root->fs_info))
|
||||
schedule_timeout(1);
|
||||
|
@ -4220,11 +4225,9 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
|
|||
BTRFS_I(inode)->outstanding_extents++;
|
||||
|
||||
if (BTRFS_I(inode)->outstanding_extents >
|
||||
BTRFS_I(inode)->reserved_extents) {
|
||||
BTRFS_I(inode)->reserved_extents)
|
||||
nr_extents = BTRFS_I(inode)->outstanding_extents -
|
||||
BTRFS_I(inode)->reserved_extents;
|
||||
BTRFS_I(inode)->reserved_extents += nr_extents;
|
||||
}
|
||||
|
||||
/*
|
||||
* Add an item to reserve for updating the inode when we complete the
|
||||
|
@ -4232,11 +4235,12 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
|
|||
*/
|
||||
if (!BTRFS_I(inode)->delalloc_meta_reserved) {
|
||||
nr_extents++;
|
||||
BTRFS_I(inode)->delalloc_meta_reserved = 1;
|
||||
extra_reserve = 1;
|
||||
}
|
||||
|
||||
to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
|
||||
to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
|
||||
csum_bytes = BTRFS_I(inode)->csum_bytes;
|
||||
spin_unlock(&BTRFS_I(inode)->lock);
|
||||
|
||||
ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
|
||||
|
@ -4246,22 +4250,35 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
|
|||
|
||||
spin_lock(&BTRFS_I(inode)->lock);
|
||||
dropped = drop_outstanding_extent(inode);
|
||||
to_free = calc_csum_metadata_size(inode, num_bytes, 0);
|
||||
spin_unlock(&BTRFS_I(inode)->lock);
|
||||
to_free += btrfs_calc_trans_metadata_size(root, dropped);
|
||||
|
||||
/*
|
||||
* Somebody could have come in and twiddled with the
|
||||
* reservation, so if we have to free more than we would have
|
||||
* reserved from this reservation go ahead and release those
|
||||
* bytes.
|
||||
* If the inodes csum_bytes is the same as the original
|
||||
* csum_bytes then we know we haven't raced with any free()ers
|
||||
* so we can just reduce our inodes csum bytes and carry on.
|
||||
* Otherwise we have to do the normal free thing to account for
|
||||
* the case that the free side didn't free up its reserve
|
||||
* because of this outstanding reservation.
|
||||
*/
|
||||
to_free -= to_reserve;
|
||||
if (BTRFS_I(inode)->csum_bytes == csum_bytes)
|
||||
calc_csum_metadata_size(inode, num_bytes, 0);
|
||||
else
|
||||
to_free = calc_csum_metadata_size(inode, num_bytes, 0);
|
||||
spin_unlock(&BTRFS_I(inode)->lock);
|
||||
if (dropped)
|
||||
to_free += btrfs_calc_trans_metadata_size(root, dropped);
|
||||
|
||||
if (to_free)
|
||||
btrfs_block_rsv_release(root, block_rsv, to_free);
|
||||
return ret;
|
||||
}
|
||||
|
||||
spin_lock(&BTRFS_I(inode)->lock);
|
||||
if (extra_reserve) {
|
||||
BTRFS_I(inode)->delalloc_meta_reserved = 1;
|
||||
nr_extents--;
|
||||
}
|
||||
BTRFS_I(inode)->reserved_extents += nr_extents;
|
||||
spin_unlock(&BTRFS_I(inode)->lock);
|
||||
|
||||
block_rsv_add_bytes(block_rsv, to_reserve, 1);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1167,6 +1167,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
|
|||
nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) /
|
||||
PAGE_CACHE_SIZE, PAGE_CACHE_SIZE /
|
||||
(sizeof(struct page *)));
|
||||
nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied);
|
||||
nrptrs = max(nrptrs, 8);
|
||||
pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
|
||||
if (!pages)
|
||||
return -ENOMEM;
|
||||
|
@ -1387,7 +1389,11 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
|
|||
goto out;
|
||||
}
|
||||
|
||||
file_update_time(file);
|
||||
err = btrfs_update_time(file);
|
||||
if (err) {
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
goto out;
|
||||
}
|
||||
BTRFS_I(inode)->sequence++;
|
||||
|
||||
start_pos = round_down(pos, root->sectorsize);
|
||||
|
|
180
fs/btrfs/inode.c
180
fs/btrfs/inode.c
|
@ -38,6 +38,7 @@
|
|||
#include <linux/falloc.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include <linux/mount.h>
|
||||
#include "compat.h"
|
||||
#include "ctree.h"
|
||||
#include "disk-io.h"
|
||||
|
@ -2031,7 +2032,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
|
|||
/* insert an orphan item to track this unlinked/truncated file */
|
||||
if (insert >= 1) {
|
||||
ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
|
||||
BUG_ON(ret);
|
||||
BUG_ON(ret && ret != -EEXIST);
|
||||
}
|
||||
|
||||
/* insert an orphan item to track subvolume contains orphan files */
|
||||
|
@ -2158,6 +2159,38 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
|
|||
if (ret && ret != -ESTALE)
|
||||
goto out;
|
||||
|
||||
if (ret == -ESTALE && root == root->fs_info->tree_root) {
|
||||
struct btrfs_root *dead_root;
|
||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||
int is_dead_root = 0;
|
||||
|
||||
/*
|
||||
* this is an orphan in the tree root. Currently these
|
||||
* could come from 2 sources:
|
||||
* a) a snapshot deletion in progress
|
||||
* b) a free space cache inode
|
||||
* We need to distinguish those two, as the snapshot
|
||||
* orphan must not get deleted.
|
||||
* find_dead_roots already ran before us, so if this
|
||||
* is a snapshot deletion, we should find the root
|
||||
* in the dead_roots list
|
||||
*/
|
||||
spin_lock(&fs_info->trans_lock);
|
||||
list_for_each_entry(dead_root, &fs_info->dead_roots,
|
||||
root_list) {
|
||||
if (dead_root->root_key.objectid ==
|
||||
found_key.objectid) {
|
||||
is_dead_root = 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
spin_unlock(&fs_info->trans_lock);
|
||||
if (is_dead_root) {
|
||||
/* prevent this orphan from being found again */
|
||||
key.offset = found_key.objectid - 1;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* Inode is already gone but the orphan item is still there,
|
||||
* kill the orphan item.
|
||||
|
@ -2191,7 +2224,14 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
|
|||
continue;
|
||||
}
|
||||
nr_truncate++;
|
||||
/*
|
||||
* Need to hold the imutex for reservation purposes, not
|
||||
* a huge deal here but I have a WARN_ON in
|
||||
* btrfs_delalloc_reserve_space to catch offenders.
|
||||
*/
|
||||
mutex_lock(&inode->i_mutex);
|
||||
ret = btrfs_truncate(inode);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
} else {
|
||||
nr_unlink++;
|
||||
}
|
||||
|
@ -3327,7 +3367,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
|
|||
u64 hint_byte = 0;
|
||||
hole_size = last_byte - cur_offset;
|
||||
|
||||
trans = btrfs_start_transaction(root, 2);
|
||||
trans = btrfs_start_transaction(root, 3);
|
||||
if (IS_ERR(trans)) {
|
||||
err = PTR_ERR(trans);
|
||||
break;
|
||||
|
@ -3337,6 +3377,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
|
|||
cur_offset + hole_size,
|
||||
&hint_byte, 1);
|
||||
if (err) {
|
||||
btrfs_update_inode(trans, root, inode);
|
||||
btrfs_end_transaction(trans, root);
|
||||
break;
|
||||
}
|
||||
|
@ -3346,6 +3387,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
|
|||
0, hole_size, 0, hole_size,
|
||||
0, 0, 0);
|
||||
if (err) {
|
||||
btrfs_update_inode(trans, root, inode);
|
||||
btrfs_end_transaction(trans, root);
|
||||
break;
|
||||
}
|
||||
|
@ -3353,6 +3395,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
|
|||
btrfs_drop_extent_cache(inode, hole_start,
|
||||
last_byte - 1, 0);
|
||||
|
||||
btrfs_update_inode(trans, root, inode);
|
||||
btrfs_end_transaction(trans, root);
|
||||
}
|
||||
free_extent_map(em);
|
||||
|
@ -3370,6 +3413,8 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size)
|
|||
|
||||
static int btrfs_setsize(struct inode *inode, loff_t newsize)
|
||||
{
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
struct btrfs_trans_handle *trans;
|
||||
loff_t oldsize = i_size_read(inode);
|
||||
int ret;
|
||||
|
||||
|
@ -3377,16 +3422,19 @@ static int btrfs_setsize(struct inode *inode, loff_t newsize)
|
|||
return 0;
|
||||
|
||||
if (newsize > oldsize) {
|
||||
i_size_write(inode, newsize);
|
||||
btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
|
||||
truncate_pagecache(inode, oldsize, newsize);
|
||||
ret = btrfs_cont_expand(inode, oldsize, newsize);
|
||||
if (ret) {
|
||||
btrfs_setsize(inode, oldsize);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
mark_inode_dirty(inode);
|
||||
trans = btrfs_start_transaction(root, 1);
|
||||
if (IS_ERR(trans))
|
||||
return PTR_ERR(trans);
|
||||
|
||||
i_size_write(inode, newsize);
|
||||
btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
|
||||
ret = btrfs_update_inode(trans, root, inode);
|
||||
btrfs_end_transaction_throttle(trans, root);
|
||||
} else {
|
||||
|
||||
/*
|
||||
|
@ -3426,9 +3474,9 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
|
|||
|
||||
if (attr->ia_valid) {
|
||||
setattr_copy(inode, attr);
|
||||
mark_inode_dirty(inode);
|
||||
err = btrfs_dirty_inode(inode);
|
||||
|
||||
if (attr->ia_valid & ATTR_MODE)
|
||||
if (!err && attr->ia_valid & ATTR_MODE)
|
||||
err = btrfs_acl_chmod(inode);
|
||||
}
|
||||
|
||||
|
@ -4204,42 +4252,80 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
|
|||
* FIXME, needs more benchmarking...there are no reasons other than performance
|
||||
* to keep or drop this code.
|
||||
*/
|
||||
void btrfs_dirty_inode(struct inode *inode, int flags)
|
||||
int btrfs_dirty_inode(struct inode *inode)
|
||||
{
|
||||
struct btrfs_root *root = BTRFS_I(inode)->root;
|
||||
struct btrfs_trans_handle *trans;
|
||||
int ret;
|
||||
|
||||
if (BTRFS_I(inode)->dummy_inode)
|
||||
return;
|
||||
return 0;
|
||||
|
||||
trans = btrfs_join_transaction(root);
|
||||
BUG_ON(IS_ERR(trans));
|
||||
if (IS_ERR(trans))
|
||||
return PTR_ERR(trans);
|
||||
|
||||
ret = btrfs_update_inode(trans, root, inode);
|
||||
if (ret && ret == -ENOSPC) {
|
||||
/* whoops, lets try again with the full transaction */
|
||||
btrfs_end_transaction(trans, root);
|
||||
trans = btrfs_start_transaction(root, 1);
|
||||
if (IS_ERR(trans)) {
|
||||
printk_ratelimited(KERN_ERR "btrfs: fail to "
|
||||
"dirty inode %llu error %ld\n",
|
||||
(unsigned long long)btrfs_ino(inode),
|
||||
PTR_ERR(trans));
|
||||
return;
|
||||
}
|
||||
if (IS_ERR(trans))
|
||||
return PTR_ERR(trans);
|
||||
|
||||
ret = btrfs_update_inode(trans, root, inode);
|
||||
if (ret) {
|
||||
printk_ratelimited(KERN_ERR "btrfs: fail to "
|
||||
"dirty inode %llu error %d\n",
|
||||
(unsigned long long)btrfs_ino(inode),
|
||||
ret);
|
||||
}
|
||||
}
|
||||
btrfs_end_transaction(trans, root);
|
||||
if (BTRFS_I(inode)->delayed_node)
|
||||
btrfs_balance_delayed_items(root);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is a copy of file_update_time. We need this so we can return error on
|
||||
* ENOSPC for updating the inode in the case of file write and mmap writes.
|
||||
*/
|
||||
int btrfs_update_time(struct file *file)
|
||||
{
|
||||
struct inode *inode = file->f_path.dentry->d_inode;
|
||||
struct timespec now;
|
||||
int ret;
|
||||
enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
|
||||
|
||||
/* First try to exhaust all avenues to not sync */
|
||||
if (IS_NOCMTIME(inode))
|
||||
return 0;
|
||||
|
||||
now = current_fs_time(inode->i_sb);
|
||||
if (!timespec_equal(&inode->i_mtime, &now))
|
||||
sync_it = S_MTIME;
|
||||
|
||||
if (!timespec_equal(&inode->i_ctime, &now))
|
||||
sync_it |= S_CTIME;
|
||||
|
||||
if (IS_I_VERSION(inode))
|
||||
sync_it |= S_VERSION;
|
||||
|
||||
if (!sync_it)
|
||||
return 0;
|
||||
|
||||
/* Finally allowed to write? Takes lock. */
|
||||
if (mnt_want_write_file(file))
|
||||
return 0;
|
||||
|
||||
/* Only change inode inside the lock region */
|
||||
if (sync_it & S_VERSION)
|
||||
inode_inc_iversion(inode);
|
||||
if (sync_it & S_CTIME)
|
||||
inode->i_ctime = now;
|
||||
if (sync_it & S_MTIME)
|
||||
inode->i_mtime = now;
|
||||
ret = btrfs_dirty_inode(inode);
|
||||
if (!ret)
|
||||
mark_inode_dirty_sync(inode);
|
||||
mnt_drop_write(file->f_path.mnt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -4555,11 +4641,18 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the active LSM wants to access the inode during
|
||||
* d_instantiate it needs these. Smack checks to see
|
||||
* if the filesystem supports xattrs by looking at the
|
||||
* ops vector.
|
||||
*/
|
||||
|
||||
inode->i_op = &btrfs_special_inode_operations;
|
||||
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
|
||||
if (err)
|
||||
drop_inode = 1;
|
||||
else {
|
||||
inode->i_op = &btrfs_special_inode_operations;
|
||||
init_special_inode(inode, inode->i_mode, rdev);
|
||||
btrfs_update_inode(trans, root, inode);
|
||||
}
|
||||
|
@ -4613,14 +4706,21 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the active LSM wants to access the inode during
|
||||
* d_instantiate it needs these. Smack checks to see
|
||||
* if the filesystem supports xattrs by looking at the
|
||||
* ops vector.
|
||||
*/
|
||||
inode->i_fop = &btrfs_file_operations;
|
||||
inode->i_op = &btrfs_file_inode_operations;
|
||||
|
||||
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
|
||||
if (err)
|
||||
drop_inode = 1;
|
||||
else {
|
||||
inode->i_mapping->a_ops = &btrfs_aops;
|
||||
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
|
||||
inode->i_fop = &btrfs_file_operations;
|
||||
inode->i_op = &btrfs_file_inode_operations;
|
||||
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
|
||||
}
|
||||
out_unlock:
|
||||
|
@ -6303,7 +6403,12 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|||
u64 page_start;
|
||||
u64 page_end;
|
||||
|
||||
/* Need this to keep space reservations serialized */
|
||||
mutex_lock(&inode->i_mutex);
|
||||
ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
if (!ret)
|
||||
ret = btrfs_update_time(vma->vm_file);
|
||||
if (ret) {
|
||||
if (ret == -ENOMEM)
|
||||
ret = VM_FAULT_OOM;
|
||||
|
@ -6515,8 +6620,9 @@ static int btrfs_truncate(struct inode *inode)
|
|||
/* Just need the 1 for updating the inode */
|
||||
trans = btrfs_start_transaction(root, 1);
|
||||
if (IS_ERR(trans)) {
|
||||
err = PTR_ERR(trans);
|
||||
goto out;
|
||||
ret = err = PTR_ERR(trans);
|
||||
trans = NULL;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -7076,14 +7182,21 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry,
|
|||
goto out_unlock;
|
||||
}
|
||||
|
||||
/*
|
||||
* If the active LSM wants to access the inode during
|
||||
* d_instantiate it needs these. Smack checks to see
|
||||
* if the filesystem supports xattrs by looking at the
|
||||
* ops vector.
|
||||
*/
|
||||
inode->i_fop = &btrfs_file_operations;
|
||||
inode->i_op = &btrfs_file_inode_operations;
|
||||
|
||||
err = btrfs_add_nondir(trans, dir, dentry, inode, 0, index);
|
||||
if (err)
|
||||
drop_inode = 1;
|
||||
else {
|
||||
inode->i_mapping->a_ops = &btrfs_aops;
|
||||
inode->i_mapping->backing_dev_info = &root->fs_info->bdi;
|
||||
inode->i_fop = &btrfs_file_operations;
|
||||
inode->i_op = &btrfs_file_inode_operations;
|
||||
BTRFS_I(inode)->io_tree.ops = &btrfs_extent_io_ops;
|
||||
}
|
||||
if (drop_inode)
|
||||
|
@ -7353,6 +7466,7 @@ static const struct inode_operations btrfs_symlink_inode_operations = {
|
|||
.follow_link = page_follow_link_light,
|
||||
.put_link = page_put_link,
|
||||
.getattr = btrfs_getattr,
|
||||
.setattr = btrfs_setattr,
|
||||
.permission = btrfs_permission,
|
||||
.setxattr = btrfs_setxattr,
|
||||
.getxattr = btrfs_getxattr,
|
||||
|
|
|
@ -252,11 +252,11 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
|
|||
trans = btrfs_join_transaction(root);
|
||||
BUG_ON(IS_ERR(trans));
|
||||
|
||||
btrfs_update_iflags(inode);
|
||||
inode->i_ctime = CURRENT_TIME;
|
||||
ret = btrfs_update_inode(trans, root, inode);
|
||||
BUG_ON(ret);
|
||||
|
||||
btrfs_update_iflags(inode);
|
||||
inode->i_ctime = CURRENT_TIME;
|
||||
btrfs_end_transaction(trans, root);
|
||||
|
||||
mnt_drop_write(file->f_path.mnt);
|
||||
|
@ -858,8 +858,10 @@ static int cluster_pages_for_defrag(struct inode *inode,
|
|||
return 0;
|
||||
file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
|
||||
|
||||
mutex_lock(&inode->i_mutex);
|
||||
ret = btrfs_delalloc_reserve_space(inode,
|
||||
num_pages << PAGE_CACHE_SHIFT);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
if (ret)
|
||||
return ret;
|
||||
again:
|
||||
|
|
|
@ -2947,7 +2947,9 @@ static int relocate_file_extent_cluster(struct inode *inode,
|
|||
index = (cluster->start - offset) >> PAGE_CACHE_SHIFT;
|
||||
last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT;
|
||||
while (index <= last_index) {
|
||||
mutex_lock(&inode->i_mutex);
|
||||
ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE);
|
||||
mutex_unlock(&inode->i_mutex);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
|
|
|
@ -1535,18 +1535,22 @@ static noinline_for_stack int scrub_supers(struct scrub_dev *sdev)
|
|||
static noinline_for_stack int scrub_workers_get(struct btrfs_root *root)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = root->fs_info;
|
||||
int ret = 0;
|
||||
|
||||
mutex_lock(&fs_info->scrub_lock);
|
||||
if (fs_info->scrub_workers_refcnt == 0) {
|
||||
btrfs_init_workers(&fs_info->scrub_workers, "scrub",
|
||||
fs_info->thread_pool_size, &fs_info->generic_worker);
|
||||
fs_info->scrub_workers.idle_thresh = 4;
|
||||
btrfs_start_workers(&fs_info->scrub_workers, 1);
|
||||
ret = btrfs_start_workers(&fs_info->scrub_workers);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
++fs_info->scrub_workers_refcnt;
|
||||
out:
|
||||
mutex_unlock(&fs_info->scrub_lock);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static noinline_for_stack void scrub_workers_put(struct btrfs_root *root)
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
#include <linux/slab.h>
|
||||
#include <linux/cleancache.h>
|
||||
#include <linux/mnt_namespace.h>
|
||||
#include <linux/ratelimit.h>
|
||||
#include "compat.h"
|
||||
#include "delayed-inode.h"
|
||||
#include "ctree.h"
|
||||
|
@ -1053,7 +1054,7 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
|
|||
u64 avail_space;
|
||||
u64 used_space;
|
||||
u64 min_stripe_size;
|
||||
int min_stripes = 1;
|
||||
int min_stripes = 1, num_stripes = 1;
|
||||
int i = 0, nr_devices;
|
||||
int ret;
|
||||
|
||||
|
@ -1067,12 +1068,16 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
|
|||
|
||||
/* calc min stripe number for data space alloction */
|
||||
type = btrfs_get_alloc_profile(root, 1);
|
||||
if (type & BTRFS_BLOCK_GROUP_RAID0)
|
||||
if (type & BTRFS_BLOCK_GROUP_RAID0) {
|
||||
min_stripes = 2;
|
||||
else if (type & BTRFS_BLOCK_GROUP_RAID1)
|
||||
num_stripes = nr_devices;
|
||||
} else if (type & BTRFS_BLOCK_GROUP_RAID1) {
|
||||
min_stripes = 2;
|
||||
else if (type & BTRFS_BLOCK_GROUP_RAID10)
|
||||
num_stripes = 2;
|
||||
} else if (type & BTRFS_BLOCK_GROUP_RAID10) {
|
||||
min_stripes = 4;
|
||||
num_stripes = 4;
|
||||
}
|
||||
|
||||
if (type & BTRFS_BLOCK_GROUP_DUP)
|
||||
min_stripe_size = 2 * BTRFS_STRIPE_LEN;
|
||||
|
@ -1141,13 +1146,16 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
|
|||
i = nr_devices - 1;
|
||||
avail_space = 0;
|
||||
while (nr_devices >= min_stripes) {
|
||||
if (num_stripes > nr_devices)
|
||||
num_stripes = nr_devices;
|
||||
|
||||
if (devices_info[i].max_avail >= min_stripe_size) {
|
||||
int j;
|
||||
u64 alloc_size;
|
||||
|
||||
avail_space += devices_info[i].max_avail * min_stripes;
|
||||
avail_space += devices_info[i].max_avail * num_stripes;
|
||||
alloc_size = devices_info[i].max_avail;
|
||||
for (j = i + 1 - min_stripes; j <= i; j++)
|
||||
for (j = i + 1 - num_stripes; j <= i; j++)
|
||||
devices_info[j].max_avail -= alloc_size;
|
||||
}
|
||||
i--;
|
||||
|
@ -1264,6 +1272,16 @@ static int btrfs_unfreeze(struct super_block *sb)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void btrfs_fs_dirty_inode(struct inode *inode, int flags)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = btrfs_dirty_inode(inode);
|
||||
if (ret)
|
||||
printk_ratelimited(KERN_ERR "btrfs: fail to dirty inode %Lu "
|
||||
"error %d\n", btrfs_ino(inode), ret);
|
||||
}
|
||||
|
||||
static const struct super_operations btrfs_super_ops = {
|
||||
.drop_inode = btrfs_drop_inode,
|
||||
.evict_inode = btrfs_evict_inode,
|
||||
|
@ -1271,7 +1289,7 @@ static const struct super_operations btrfs_super_ops = {
|
|||
.sync_fs = btrfs_sync_fs,
|
||||
.show_options = btrfs_show_options,
|
||||
.write_inode = btrfs_write_inode,
|
||||
.dirty_inode = btrfs_dirty_inode,
|
||||
.dirty_inode = btrfs_fs_dirty_inode,
|
||||
.alloc_inode = btrfs_alloc_inode,
|
||||
.destroy_inode = btrfs_destroy_inode,
|
||||
.statfs = btrfs_statfs,
|
||||
|
|
|
@ -295,6 +295,12 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
|
|||
btrfs_requeue_work(&device->work);
|
||||
goto done;
|
||||
}
|
||||
/* unplug every 64 requests just for good measure */
|
||||
if (batch_run % 64 == 0) {
|
||||
blk_finish_plug(&plug);
|
||||
blk_start_plug(&plug);
|
||||
sync_pending = 0;
|
||||
}
|
||||
}
|
||||
|
||||
cond_resched();
|
||||
|
@ -3258,7 +3264,7 @@ static void btrfs_end_bio(struct bio *bio, int err)
|
|||
*/
|
||||
if (atomic_read(&bbio->error) > bbio->max_errors) {
|
||||
err = -EIO;
|
||||
} else if (err) {
|
||||
} else {
|
||||
/*
|
||||
* this bio is actually up to date, we didn't
|
||||
* go over the max number of errors
|
||||
|
|
Loading…
Reference in New Issue
Block a user