forked from luck/tmp_suning_uos_patched
GFS2: Optimise writepage for metadata
This adds a GFS2 specific writepage for metadata, rather than continuing to use the VFS function. As a result we now tag all our metadata I/O with the correct flag so that blktraces will now be less confusing. Also, the generic function was checking for a number of corner cases which cannot happen on the metadata address spaces so that this should be faster too. Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
This commit is contained in:
parent
c969f58ca4
commit
4a0f9a321a
|
@ -33,17 +33,65 @@
|
|||
#include "util.h"
|
||||
#include "ops_address.h"
|
||||
|
||||
static int aspace_get_block(struct inode *inode, sector_t lblock,
|
||||
struct buffer_head *bh_result, int create)
|
||||
static int gfs2_aspace_writepage(struct page *page, struct writeback_control *wbc)
|
||||
{
|
||||
gfs2_assert_warn(inode->i_sb->s_fs_info, 0);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
int err;
|
||||
struct buffer_head *bh, *head;
|
||||
int nr_underway = 0;
|
||||
int write_op = (1 << BIO_RW_META) | ((wbc->sync_mode == WB_SYNC_ALL ?
|
||||
WRITE_SYNC_PLUG : WRITE));
|
||||
|
||||
static int gfs2_aspace_writepage(struct page *page,
|
||||
struct writeback_control *wbc)
|
||||
{
|
||||
return block_write_full_page(page, aspace_get_block, wbc);
|
||||
BUG_ON(!PageLocked(page));
|
||||
BUG_ON(!page_has_buffers(page));
|
||||
|
||||
head = page_buffers(page);
|
||||
bh = head;
|
||||
|
||||
do {
|
||||
if (!buffer_mapped(bh))
|
||||
continue;
|
||||
/*
|
||||
* If it's a fully non-blocking write attempt and we cannot
|
||||
* lock the buffer then redirty the page. Note that this can
|
||||
* potentially cause a busy-wait loop from pdflush and kswapd
|
||||
* activity, but those code paths have their own higher-level
|
||||
* throttling.
|
||||
*/
|
||||
if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) {
|
||||
lock_buffer(bh);
|
||||
} else if (!trylock_buffer(bh)) {
|
||||
redirty_page_for_writepage(wbc, page);
|
||||
continue;
|
||||
}
|
||||
if (test_clear_buffer_dirty(bh)) {
|
||||
mark_buffer_async_write(bh);
|
||||
} else {
|
||||
unlock_buffer(bh);
|
||||
}
|
||||
} while ((bh = bh->b_this_page) != head);
|
||||
|
||||
/*
|
||||
* The page and its buffers are protected by PageWriteback(), so we can
|
||||
* drop the bh refcounts early.
|
||||
*/
|
||||
BUG_ON(PageWriteback(page));
|
||||
set_page_writeback(page);
|
||||
|
||||
do {
|
||||
struct buffer_head *next = bh->b_this_page;
|
||||
if (buffer_async_write(bh)) {
|
||||
submit_bh(write_op, bh);
|
||||
nr_underway++;
|
||||
}
|
||||
bh = next;
|
||||
} while (bh != head);
|
||||
unlock_page(page);
|
||||
|
||||
err = 0;
|
||||
if (nr_underway == 0)
|
||||
end_page_writeback(page);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static const struct address_space_operations aspace_aops = {
|
||||
|
|
Loading…
Reference in New Issue
Block a user