forked from luck/tmp_suning_uos_patched
rcu: Mark task as .need_qs less aggressively
If any scheduling-clock interrupt interrupts an RCU-preempt read-side critical section, the interrupted task's ->rcu_read_unlock_special.b.need_qs field is set. This causes the outermost rcu_read_unlock() to incur the extra overhead of calling into rcu_read_unlock_special(). This commit reduces that overhead by setting ->rcu_read_unlock_special.b.need_qs only if the grace period has been in effect for more than one second. Why one second? Because this is comfortably smaller than the minimum RCU CPU stall-warning timeout of three seconds, but long enough that the .need_qs marking should happen quite rarely. And if your RCU read-side critical section has run on-CPU for a full second, it is not unreasonable to invest some CPU time in ending the grace period quickly. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
parent
6f56f714db
commit
15651201fa
|
@ -730,6 +730,7 @@ rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, struct rcu_node *rnp)
|
|||
*/
|
||||
static void rcu_preempt_check_callbacks(void)
|
||||
{
|
||||
struct rcu_state *rsp = &rcu_preempt_state;
|
||||
struct task_struct *t = current;
|
||||
|
||||
if (t->rcu_read_lock_nesting == 0) {
|
||||
|
@ -738,7 +739,9 @@ static void rcu_preempt_check_callbacks(void)
|
|||
}
|
||||
if (t->rcu_read_lock_nesting > 0 &&
|
||||
__this_cpu_read(rcu_data_p->core_needs_qs) &&
|
||||
__this_cpu_read(rcu_data_p->cpu_no_qs.b.norm))
|
||||
__this_cpu_read(rcu_data_p->cpu_no_qs.b.norm) &&
|
||||
!t->rcu_read_unlock_special.b.need_qs &&
|
||||
time_after(jiffies, rsp->gp_start + HZ))
|
||||
t->rcu_read_unlock_special.b.need_qs = true;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user