forked from luck/tmp_suning_uos_patched
sched: round a bit better
round a tiny bit better in high-frequency rescheduling scenarios, by rounding around zero instead of rounding down. (this is pretty theoretical though) Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
254753dc32
commit
194081ebfa
|
@ -638,6 +638,11 @@ static u64 div64_likely32(u64 divident, unsigned long divisor)
|
||||||
|
|
||||||
#define WMULT_SHIFT 32
|
#define WMULT_SHIFT 32
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Shift right and round:
|
||||||
|
*/
|
||||||
|
#define RSR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
|
||||||
|
|
||||||
static unsigned long
|
static unsigned long
|
||||||
calc_delta_mine(unsigned long delta_exec, unsigned long weight,
|
calc_delta_mine(unsigned long delta_exec, unsigned long weight,
|
||||||
struct load_weight *lw)
|
struct load_weight *lw)
|
||||||
|
@ -645,18 +650,17 @@ calc_delta_mine(unsigned long delta_exec, unsigned long weight,
|
||||||
u64 tmp;
|
u64 tmp;
|
||||||
|
|
||||||
if (unlikely(!lw->inv_weight))
|
if (unlikely(!lw->inv_weight))
|
||||||
lw->inv_weight = WMULT_CONST / lw->weight;
|
lw->inv_weight = (WMULT_CONST - lw->weight/2) / lw->weight + 1;
|
||||||
|
|
||||||
tmp = (u64)delta_exec * weight;
|
tmp = (u64)delta_exec * weight;
|
||||||
/*
|
/*
|
||||||
* Check whether we'd overflow the 64-bit multiplication:
|
* Check whether we'd overflow the 64-bit multiplication:
|
||||||
*/
|
*/
|
||||||
if (unlikely(tmp > WMULT_CONST)) {
|
if (unlikely(tmp > WMULT_CONST))
|
||||||
tmp = ((tmp >> WMULT_SHIFT/2) * lw->inv_weight)
|
tmp = RSR(RSR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
|
||||||
>> (WMULT_SHIFT/2);
|
WMULT_SHIFT/2);
|
||||||
} else {
|
else
|
||||||
tmp = (tmp * lw->inv_weight) >> WMULT_SHIFT;
|
tmp = RSR(tmp * lw->inv_weight, WMULT_SHIFT);
|
||||||
}
|
|
||||||
|
|
||||||
return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
|
return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue
Block a user