forked from luck/tmp_suning_uos_patched
readahead: remove several readahead macros
Remove VM_MAX_CACHE_HIT, MAX_RA_PAGES and MIN_RA_PAGES. Signed-off-by: Fengguang Wu <wfg@mail.ustc.edu.cn> Cc: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
7ff81078d8
commit
535443f515
|
@ -1106,8 +1106,6 @@ int write_one_page(struct page *page, int wait);
|
|||
/* readahead.c */
|
||||
#define VM_MAX_READAHEAD 128 /* kbytes */
|
||||
#define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */
|
||||
#define VM_MAX_CACHE_HIT 256 /* max pages in a row in cache before
|
||||
* turning readahead off */
|
||||
|
||||
int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
|
||||
pgoff_t offset, unsigned long nr_to_read);
|
||||
|
|
|
@ -22,16 +22,8 @@ void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
|
|||
}
|
||||
EXPORT_SYMBOL(default_unplug_io_fn);
|
||||
|
||||
/*
|
||||
* Convienent macros for min/max read-ahead pages.
|
||||
* Note that MAX_RA_PAGES is rounded down, while MIN_RA_PAGES is rounded up.
|
||||
* The latter is necessary for systems with large page size(i.e. 64k).
|
||||
*/
|
||||
#define MAX_RA_PAGES (VM_MAX_READAHEAD*1024 / PAGE_CACHE_SIZE)
|
||||
#define MIN_RA_PAGES DIV_ROUND_UP(VM_MIN_READAHEAD*1024, PAGE_CACHE_SIZE)
|
||||
|
||||
struct backing_dev_info default_backing_dev_info = {
|
||||
.ra_pages = MAX_RA_PAGES,
|
||||
.ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
|
||||
.state = 0,
|
||||
.capabilities = BDI_CAP_MAP_COPY,
|
||||
.unplug_io_fn = default_unplug_io_fn,
|
||||
|
|
Loading…
Reference in New Issue
Block a user