2012-11-29 12:28:09 +08:00
|
|
|
/*
|
f2fs: add garbage collection functions
This adds on-demand and background cleaning functions.
- The basic background cleaning policy is trying to do cleaning jobs as much as
possible whenever the system is idle. Once the background cleaning is done,
the cleaner sleeps an amount of time not to interfere with VFS calls. The time
is dynamically adjusted according to the status of whole segments, which is
decreased when the following conditions are satisfied.
. GC is not conducted currently, and
. IO subsystem is idle by checking the number of requets in bdev's request
list, and
. There are enough dirty segments.
Otherwise, the time is increased incrementally until to the maximum time.
Note that, min and max times are 10 secs and 30 secs by default.
- F2FS adopts a default victim selection policy where background cleaning uses
a cost-benefit algorithm, while on-demand cleaning uses a greedy algorithm.
- The method of moving data during the cleaning is slightly different between
background and on-demand cleaning schemes. In the case of background cleaning,
F2FS loads the data, and marks them as dirty. Then, F2FS expects that the data
will be moved by flusher or VM. In the case of on-demand cleaning, F2FS should
move the data right away.
- In order to identify valid blocks in a victim segment, F2FS scans the bitmap
of the segment managed as an SIT entry.
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2012-11-02 16:13:01 +08:00
|
|
|
* fs/f2fs/gc.h
|
|
|
|
*
|
|
|
|
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
|
|
|
|
* http://www.samsung.com/
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
|
|
* published by the Free Software Foundation.
|
|
|
|
*/
|
|
|
|
#define GC_THREAD_MIN_WB_PAGES 1 /*
|
|
|
|
* a threshold to determine
|
|
|
|
* whether IO subsystem is idle
|
|
|
|
* or not
|
|
|
|
*/
|
|
|
|
#define GC_THREAD_MIN_SLEEP_TIME 10000 /* milliseconds */
|
|
|
|
#define GC_THREAD_MAX_SLEEP_TIME 30000
|
|
|
|
#define GC_THREAD_NOGC_SLEEP_TIME 10000
|
|
|
|
#define LIMIT_INVALID_BLOCK 40 /* percentage over total user space */
|
|
|
|
#define LIMIT_FREE_BLOCK 40 /* percentage over invalid + free space */
|
|
|
|
|
|
|
|
/* Search max. number of dirty segments to select a victim segment */
|
|
|
|
#define MAX_VICTIM_SEARCH 20
|
|
|
|
|
|
|
|
struct f2fs_gc_kthread {
|
|
|
|
struct task_struct *f2fs_gc_task;
|
|
|
|
wait_queue_head_t gc_wait_queue_head;
|
|
|
|
};
|
|
|
|
|
|
|
|
struct inode_entry {
|
|
|
|
struct list_head list;
|
|
|
|
struct inode *inode;
|
|
|
|
};
|
|
|
|
|
2012-11-29 12:28:09 +08:00
|
|
|
/*
|
f2fs: add garbage collection functions
This adds on-demand and background cleaning functions.
- The basic background cleaning policy is trying to do cleaning jobs as much as
possible whenever the system is idle. Once the background cleaning is done,
the cleaner sleeps an amount of time not to interfere with VFS calls. The time
is dynamically adjusted according to the status of whole segments, which is
decreased when the following conditions are satisfied.
. GC is not conducted currently, and
. IO subsystem is idle by checking the number of requets in bdev's request
list, and
. There are enough dirty segments.
Otherwise, the time is increased incrementally until to the maximum time.
Note that, min and max times are 10 secs and 30 secs by default.
- F2FS adopts a default victim selection policy where background cleaning uses
a cost-benefit algorithm, while on-demand cleaning uses a greedy algorithm.
- The method of moving data during the cleaning is slightly different between
background and on-demand cleaning schemes. In the case of background cleaning,
F2FS loads the data, and marks them as dirty. Then, F2FS expects that the data
will be moved by flusher or VM. In the case of on-demand cleaning, F2FS should
move the data right away.
- In order to identify valid blocks in a victim segment, F2FS scans the bitmap
of the segment managed as an SIT entry.
Signed-off-by: Jaegeuk Kim <jaegeuk.kim@samsung.com>
2012-11-02 16:13:01 +08:00
|
|
|
* inline functions
|
|
|
|
*/
|
|
|
|
static inline block_t free_user_blocks(struct f2fs_sb_info *sbi)
|
|
|
|
{
|
|
|
|
if (free_segments(sbi) < overprovision_segments(sbi))
|
|
|
|
return 0;
|
|
|
|
else
|
|
|
|
return (free_segments(sbi) - overprovision_segments(sbi))
|
|
|
|
<< sbi->log_blocks_per_seg;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline block_t limit_invalid_user_blocks(struct f2fs_sb_info *sbi)
|
|
|
|
{
|
|
|
|
return (long)(sbi->user_block_count * LIMIT_INVALID_BLOCK) / 100;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline block_t limit_free_user_blocks(struct f2fs_sb_info *sbi)
|
|
|
|
{
|
|
|
|
block_t reclaimable_user_blocks = sbi->user_block_count -
|
|
|
|
written_block_count(sbi);
|
|
|
|
return (long)(reclaimable_user_blocks * LIMIT_FREE_BLOCK) / 100;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline long increase_sleep_time(long wait)
|
|
|
|
{
|
|
|
|
wait += GC_THREAD_MIN_SLEEP_TIME;
|
|
|
|
if (wait > GC_THREAD_MAX_SLEEP_TIME)
|
|
|
|
wait = GC_THREAD_MAX_SLEEP_TIME;
|
|
|
|
return wait;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline long decrease_sleep_time(long wait)
|
|
|
|
{
|
|
|
|
wait -= GC_THREAD_MIN_SLEEP_TIME;
|
|
|
|
if (wait <= GC_THREAD_MIN_SLEEP_TIME)
|
|
|
|
wait = GC_THREAD_MIN_SLEEP_TIME;
|
|
|
|
return wait;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool has_enough_invalid_blocks(struct f2fs_sb_info *sbi)
|
|
|
|
{
|
|
|
|
block_t invalid_user_blocks = sbi->user_block_count -
|
|
|
|
written_block_count(sbi);
|
|
|
|
/*
|
|
|
|
* Background GC is triggered with the following condition.
|
|
|
|
* 1. There are a number of invalid blocks.
|
|
|
|
* 2. There is not enough free space.
|
|
|
|
*/
|
|
|
|
if (invalid_user_blocks > limit_invalid_user_blocks(sbi) &&
|
|
|
|
free_user_blocks(sbi) < limit_free_user_blocks(sbi))
|
|
|
|
return true;
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int is_idle(struct f2fs_sb_info *sbi)
|
|
|
|
{
|
|
|
|
struct block_device *bdev = sbi->sb->s_bdev;
|
|
|
|
struct request_queue *q = bdev_get_queue(bdev);
|
|
|
|
struct request_list *rl = &q->root_rl;
|
|
|
|
return !(rl->count[BLK_RW_SYNC]) && !(rl->count[BLK_RW_ASYNC]);
|
|
|
|
}
|