forked from luck/tmp_suning_uos_patched
timekeeping: Rework frequency adjustments to work better w/ nohz
The existing timekeeping_adjust logic has always been complicated to understand. Further, since it was developed prior to NOHZ becoming common, its not surprising it performs poorly when NOHZ is enabled. Since Miroslav pointed out the problematic nature of the existing code in the NOHZ case, I've tried to refactor the code to perform better. The problem with the previous approach was that it tried to adjust for the total cumulative error using a scaled dampening factor. This resulted in large errors to be corrected slowly, while small errors were corrected quickly. With NOHZ the timekeeping code doesn't know how far out the next tick will be, so this results in bad over-correction to small errors, and insufficient correction to large errors. Inspired by Miroslav's patch, I've refactored the code to try to address the correction in two steps. 1) Check the future freq error for the next tick, and if the frequency error is large, try to make sure we correct it so it doesn't cause much accumulated error. 2) Then make a small single unit adjustment to correct any cumulative error that has collected over time. This method performs fairly well in the simulator Miroslav created. Major credit to Miroslav for pointing out the issue, providing the original patch to resolve this, a simulator for testing, as well as helping debug and resolve issues in my implementation so that it performed closer to his original implementation. Cc: Miroslav Lichvar <mlichvar@redhat.com> Cc: Richard Cochran <richardcochran@gmail.com> Cc: Prarit Bhargava <prarit@redhat.com> Reported-by: Miroslav Lichvar <mlichvar@redhat.com> Signed-off-by: John Stultz <john.stultz@linaro.org>
This commit is contained in:
parent
e2dff1ec0c
commit
dc491596f6
|
@ -92,6 +92,7 @@ struct timekeeper {
|
|||
u32 raw_interval;
|
||||
s64 ntp_error;
|
||||
u32 ntp_error_shift;
|
||||
u32 ntp_err_mult;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
|
||||
|
|
|
@ -178,6 +178,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
|
|||
* to counteract clock drifting.
|
||||
*/
|
||||
tk->tkr.mult = clock->mult;
|
||||
tk->ntp_err_mult = 0;
|
||||
}
|
||||
|
||||
/* Timekeeper helper functions. */
|
||||
|
@ -1257,125 +1258,34 @@ static int __init timekeeping_init_ops(void)
|
|||
register_syscore_ops(&timekeeping_syscore_ops);
|
||||
return 0;
|
||||
}
|
||||
|
||||
device_initcall(timekeeping_init_ops);
|
||||
|
||||
/*
|
||||
* If the error is already larger, we look ahead even further
|
||||
* to compensate for late or lost adjustments.
|
||||
* Apply a multiplier adjustment to the timekeeper
|
||||
*/
|
||||
static __always_inline int timekeeping_bigadjust(struct timekeeper *tk,
|
||||
s64 error, s64 *interval,
|
||||
s64 *offset)
|
||||
static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
|
||||
s64 offset,
|
||||
bool negative,
|
||||
int adj_scale)
|
||||
{
|
||||
s64 tick_error, i;
|
||||
u32 look_ahead, adj;
|
||||
s32 error2, mult;
|
||||
s64 interval = tk->cycle_interval;
|
||||
s32 mult_adj = 1;
|
||||
|
||||
/*
|
||||
* Use the current error value to determine how much to look ahead.
|
||||
* The larger the error the slower we adjust for it to avoid problems
|
||||
* with losing too many ticks, otherwise we would overadjust and
|
||||
* produce an even larger error. The smaller the adjustment the
|
||||
* faster we try to adjust for it, as lost ticks can do less harm
|
||||
* here. This is tuned so that an error of about 1 msec is adjusted
|
||||
* within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
|
||||
*/
|
||||
error2 = tk->ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ);
|
||||
error2 = abs(error2);
|
||||
for (look_ahead = 0; error2 > 0; look_ahead++)
|
||||
error2 >>= 2;
|
||||
|
||||
/*
|
||||
* Now calculate the error in (1 << look_ahead) ticks, but first
|
||||
* remove the single look ahead already included in the error.
|
||||
*/
|
||||
tick_error = ntp_tick_length() >> (tk->ntp_error_shift + 1);
|
||||
tick_error -= tk->xtime_interval >> 1;
|
||||
error = ((error - tick_error) >> look_ahead) + tick_error;
|
||||
|
||||
/* Finally calculate the adjustment shift value. */
|
||||
i = *interval;
|
||||
mult = 1;
|
||||
if (error < 0) {
|
||||
error = -error;
|
||||
*interval = -*interval;
|
||||
*offset = -*offset;
|
||||
mult = -1;
|
||||
if (negative) {
|
||||
mult_adj = -mult_adj;
|
||||
interval = -interval;
|
||||
offset = -offset;
|
||||
}
|
||||
for (adj = 0; error > i; adj++)
|
||||
error >>= 1;
|
||||
mult_adj <<= adj_scale;
|
||||
interval <<= adj_scale;
|
||||
offset <<= adj_scale;
|
||||
|
||||
*interval <<= adj;
|
||||
*offset <<= adj;
|
||||
return mult << adj;
|
||||
}
|
||||
|
||||
/*
|
||||
* Adjust the multiplier to reduce the error value,
|
||||
* this is optimized for the most common adjustments of -1,0,1,
|
||||
* for other values we can do a bit more work.
|
||||
*/
|
||||
static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
|
||||
{
|
||||
s64 error, interval = tk->cycle_interval;
|
||||
int adj;
|
||||
|
||||
/*
|
||||
* The point of this is to check if the error is greater than half
|
||||
* an interval.
|
||||
*
|
||||
* First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
|
||||
*
|
||||
* Note we subtract one in the shift, so that error is really error*2.
|
||||
* This "saves" dividing(shifting) interval twice, but keeps the
|
||||
* (error > interval) comparison as still measuring if error is
|
||||
* larger than half an interval.
|
||||
*
|
||||
* Note: It does not "save" on aggravation when reading the code.
|
||||
*/
|
||||
error = tk->ntp_error >> (tk->ntp_error_shift - 1);
|
||||
if (error > interval) {
|
||||
/*
|
||||
* We now divide error by 4(via shift), which checks if
|
||||
* the error is greater than twice the interval.
|
||||
* If it is greater, we need a bigadjust, if its smaller,
|
||||
* we can adjust by 1.
|
||||
*/
|
||||
error >>= 2;
|
||||
if (likely(error <= interval))
|
||||
adj = 1;
|
||||
else
|
||||
adj = timekeeping_bigadjust(tk, error, &interval, &offset);
|
||||
} else {
|
||||
if (error < -interval) {
|
||||
/* See comment above, this is just switched for the negative */
|
||||
error >>= 2;
|
||||
if (likely(error >= -interval)) {
|
||||
adj = -1;
|
||||
interval = -interval;
|
||||
offset = -offset;
|
||||
} else {
|
||||
adj = timekeeping_bigadjust(tk, error, &interval, &offset);
|
||||
}
|
||||
} else {
|
||||
goto out_adjust;
|
||||
}
|
||||
}
|
||||
|
||||
if (unlikely(tk->tkr.clock->maxadj &&
|
||||
(tk->tkr.mult + adj > tk->tkr.clock->mult + tk->tkr.clock->maxadj))) {
|
||||
printk_deferred_once(KERN_WARNING
|
||||
"Adjusting %s more than 11%% (%ld vs %ld)\n",
|
||||
tk->tkr.clock->name, (long)tk->tkr.mult + adj,
|
||||
(long)tk->tkr.clock->mult + tk->tkr.clock->maxadj);
|
||||
}
|
||||
/*
|
||||
* So the following can be confusing.
|
||||
*
|
||||
* To keep things simple, lets assume adj == 1 for now.
|
||||
* To keep things simple, lets assume mult_adj == 1 for now.
|
||||
*
|
||||
* When adj != 1, remember that the interval and offset values
|
||||
* When mult_adj != 1, remember that the interval and offset values
|
||||
* have been appropriately scaled so the math is the same.
|
||||
*
|
||||
* The basic idea here is that we're increasing the multiplier
|
||||
|
@ -1419,12 +1329,76 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
|
|||
*
|
||||
* XXX - TODO: Doc ntp_error calculation.
|
||||
*/
|
||||
tk->tkr.mult += adj;
|
||||
tk->tkr.mult += mult_adj;
|
||||
tk->xtime_interval += interval;
|
||||
tk->tkr.xtime_nsec -= offset;
|
||||
tk->ntp_error -= (interval - offset) << tk->ntp_error_shift;
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate the multiplier adjustment needed to match the frequency
|
||||
* specified by NTP
|
||||
*/
|
||||
static __always_inline void timekeeping_freqadjust(struct timekeeper *tk,
|
||||
s64 offset)
|
||||
{
|
||||
s64 interval = tk->cycle_interval;
|
||||
s64 xinterval = tk->xtime_interval;
|
||||
s64 tick_error;
|
||||
bool negative;
|
||||
u32 adj;
|
||||
|
||||
/* Remove any current error adj from freq calculation */
|
||||
if (tk->ntp_err_mult)
|
||||
xinterval -= tk->cycle_interval;
|
||||
|
||||
/* Calculate current error per tick */
|
||||
tick_error = ntp_tick_length() >> tk->ntp_error_shift;
|
||||
tick_error -= (xinterval + tk->xtime_remainder);
|
||||
|
||||
/* Don't worry about correcting it if its small */
|
||||
if (likely((tick_error >= 0) && (tick_error <= interval)))
|
||||
return;
|
||||
|
||||
/* preserve the direction of correction */
|
||||
negative = (tick_error < 0);
|
||||
|
||||
/* Sort out the magnitude of the correction */
|
||||
tick_error = abs(tick_error);
|
||||
for (adj = 0; tick_error > interval; adj++)
|
||||
tick_error >>= 1;
|
||||
|
||||
/* scale the corrections */
|
||||
timekeeping_apply_adjustment(tk, offset, negative, adj);
|
||||
}
|
||||
|
||||
/*
|
||||
* Adjust the timekeeper's multiplier to the correct frequency
|
||||
* and also to reduce the accumulated error value.
|
||||
*/
|
||||
static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
|
||||
{
|
||||
/* Correct for the current frequency error */
|
||||
timekeeping_freqadjust(tk, offset);
|
||||
|
||||
/* Next make a small adjustment to fix any cumulative error */
|
||||
if (!tk->ntp_err_mult && (tk->ntp_error > 0)) {
|
||||
tk->ntp_err_mult = 1;
|
||||
timekeeping_apply_adjustment(tk, offset, 0, 0);
|
||||
} else if (tk->ntp_err_mult && (tk->ntp_error <= 0)) {
|
||||
/* Undo any existing error adjustment */
|
||||
timekeeping_apply_adjustment(tk, offset, 1, 0);
|
||||
tk->ntp_err_mult = 0;
|
||||
}
|
||||
|
||||
if (unlikely(tk->tkr.clock->maxadj &&
|
||||
(tk->tkr.mult > tk->tkr.clock->mult + tk->tkr.clock->maxadj))) {
|
||||
printk_once(KERN_WARNING
|
||||
"Adjusting %s more than 11%% (%ld vs %ld)\n",
|
||||
tk->tkr.clock->name, (long)tk->tkr.mult,
|
||||
(long)tk->tkr.clock->mult + tk->tkr.clock->maxadj);
|
||||
}
|
||||
|
||||
out_adjust:
|
||||
/*
|
||||
* It may be possible that when we entered this function, xtime_nsec
|
||||
* was very small. Further, if we're slightly speeding the clocksource
|
||||
|
@ -1444,7 +1418,6 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
|
|||
tk->tkr.xtime_nsec = 0;
|
||||
tk->ntp_error += neg << tk->ntp_error_shift;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
Loading…
Reference in New Issue
Block a user