forked from luck/tmp_suning_uos_patched
tick: Mark tick related hrtimers to expiry in hard interrupt context
The tick related hrtimers, which drive the scheduler tick and hrtimer based broadcasting are required to expire in hard interrupt context for obvious reasons. Mark them so PREEMPT_RT kernels wont move them to soft interrupt expiry. Make the horribly formatted RCU_NONIDLE bracket maze readable while at it. No functional change, [ tglx: Split out from larger combo patch. Add changelog ] Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20190726185753.459144407@linutronix.de
This commit is contained in:
parent
2c0d278f32
commit
902a9f9c50
|
@ -59,11 +59,16 @@ static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
|
|||
* hrtimer_{start/cancel} functions call into tracing,
|
||||
* calls to these functions must be bound within RCU_NONIDLE.
|
||||
*/
|
||||
RCU_NONIDLE({
|
||||
RCU_NONIDLE(
|
||||
{
|
||||
bc_moved = hrtimer_try_to_cancel(&bctimer) >= 0;
|
||||
if (bc_moved)
|
||||
if (bc_moved) {
|
||||
hrtimer_start(&bctimer, expires,
|
||||
HRTIMER_MODE_ABS_PINNED);});
|
||||
HRTIMER_MODE_ABS_PINNED_HARD);
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
if (bc_moved) {
|
||||
/* Bind the "device" to the cpu */
|
||||
bc->bound_on = smp_processor_id();
|
||||
|
@ -104,7 +109,7 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t)
|
|||
|
||||
void tick_setup_hrtimer_broadcast(void)
|
||||
{
|
||||
hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
|
||||
hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
|
||||
bctimer.function = bc_handler;
|
||||
clockevents_register_device(&ce_broadcast_hrtimer);
|
||||
}
|
||||
|
|
|
@ -634,10 +634,12 @@ static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
|
|||
/* Forward the time to expire in the future */
|
||||
hrtimer_forward(&ts->sched_timer, now, tick_period);
|
||||
|
||||
if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
|
||||
hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
|
||||
else
|
||||
if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
|
||||
hrtimer_start_expires(&ts->sched_timer,
|
||||
HRTIMER_MODE_ABS_PINNED_HARD);
|
||||
} else {
|
||||
tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
|
||||
}
|
||||
|
||||
/*
|
||||
* Reset to make sure next tick stop doesn't get fooled by past
|
||||
|
@ -802,7 +804,8 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu)
|
|||
}
|
||||
|
||||
if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
|
||||
hrtimer_start(&ts->sched_timer, tick, HRTIMER_MODE_ABS_PINNED);
|
||||
hrtimer_start(&ts->sched_timer, tick,
|
||||
HRTIMER_MODE_ABS_PINNED_HARD);
|
||||
} else {
|
||||
hrtimer_set_expires(&ts->sched_timer, tick);
|
||||
tick_program_event(tick, 1);
|
||||
|
@ -1327,7 +1330,7 @@ void tick_setup_sched_timer(void)
|
|||
/*
|
||||
* Emulate tick processing via per-CPU hrtimers:
|
||||
*/
|
||||
hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
|
||||
hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD);
|
||||
ts->sched_timer.function = tick_sched_timer;
|
||||
|
||||
/* Get the next period (per-CPU) */
|
||||
|
@ -1342,7 +1345,7 @@ void tick_setup_sched_timer(void)
|
|||
}
|
||||
|
||||
hrtimer_forward(&ts->sched_timer, now, tick_period);
|
||||
hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
|
||||
hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD);
|
||||
tick_nohz_activate(ts, NOHZ_MODE_HIGHRES);
|
||||
}
|
||||
#endif /* HIGH_RES_TIMERS */
|
||||
|
|
Loading…
Reference in New Issue
Block a user