forked from luck/tmp_suning_uos_patched
iocost: improve nr_lagging handling
Some IOs may span multiple periods. As latencies are collected on completion, the inbetween periods won't register them and may incorrectly decide to increase vrate. nr_lagging tracks these IOs to avoid those situations. Currently, whenever there are IOs which are spanning from the previous period, busy_level is reset to 0 if negative thus suppressing vrate increase. This has the following two problems. * When latency target percentiles aren't set, vrate adjustment should only be governed by queue depth depletion; however, the current code keeps nr_lagging active which pulls in latency results and can keep down vrate unexpectedly. * When lagging condition is detected, it resets the entire negative busy_level. This turned out to be way too aggressive on some devices which sometimes experience extended latencies on a small subset of commands. In addition, a lagging IO will be accounted as latency target miss on completion anyway and resetting busy_level amplifies its impact unnecessarily. This patch fixes the above two problems by disabling nr_lagging counting when latency target percentiles aren't set and blocking vrate increases when there are lagging IOs while leaving busy_level as-is. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
This commit is contained in:
parent
25d41e4aad
commit
7cd806a9a9
|
@ -1407,7 +1407,8 @@ static void ioc_timer_fn(struct timer_list *timer)
|
||||||
* comparing vdone against period start. If lagging behind
|
* comparing vdone against period start. If lagging behind
|
||||||
* IOs from past periods, don't increase vrate.
|
* IOs from past periods, don't increase vrate.
|
||||||
*/
|
*/
|
||||||
if (!atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
|
if ((ppm_rthr != MILLION || ppm_wthr != MILLION) &&
|
||||||
|
!atomic_read(&iocg_to_blkg(iocg)->use_delay) &&
|
||||||
time_after64(vtime, vdone) &&
|
time_after64(vtime, vdone) &&
|
||||||
time_after64(vtime, now.vnow -
|
time_after64(vtime, now.vnow -
|
||||||
MAX_LAGGING_PERIODS * period_vtime) &&
|
MAX_LAGGING_PERIODS * period_vtime) &&
|
||||||
|
@ -1537,21 +1538,23 @@ static void ioc_timer_fn(struct timer_list *timer)
|
||||||
missed_ppm[WRITE] > ppm_wthr) {
|
missed_ppm[WRITE] > ppm_wthr) {
|
||||||
ioc->busy_level = max(ioc->busy_level, 0);
|
ioc->busy_level = max(ioc->busy_level, 0);
|
||||||
ioc->busy_level++;
|
ioc->busy_level++;
|
||||||
} else if (nr_lagging) {
|
} else if (rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
|
||||||
ioc->busy_level = max(ioc->busy_level, 0);
|
|
||||||
} else if (nr_shortages && !nr_surpluses &&
|
|
||||||
rq_wait_pct <= RQ_WAIT_BUSY_PCT * UNBUSY_THR_PCT / 100 &&
|
|
||||||
missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
|
missed_ppm[READ] <= ppm_rthr * UNBUSY_THR_PCT / 100 &&
|
||||||
missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
|
missed_ppm[WRITE] <= ppm_wthr * UNBUSY_THR_PCT / 100) {
|
||||||
ioc->busy_level = min(ioc->busy_level, 0);
|
/* take action iff there is contention */
|
||||||
ioc->busy_level--;
|
if (nr_shortages && !nr_lagging) {
|
||||||
|
ioc->busy_level = min(ioc->busy_level, 0);
|
||||||
|
/* redistribute surpluses first */
|
||||||
|
if (!nr_surpluses)
|
||||||
|
ioc->busy_level--;
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
ioc->busy_level = 0;
|
ioc->busy_level = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
|
ioc->busy_level = clamp(ioc->busy_level, -1000, 1000);
|
||||||
|
|
||||||
if (ioc->busy_level) {
|
if (ioc->busy_level > 0 || (ioc->busy_level < 0 && !nr_lagging)) {
|
||||||
u64 vrate = atomic64_read(&ioc->vtime_rate);
|
u64 vrate = atomic64_read(&ioc->vtime_rate);
|
||||||
u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
|
u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user