forked from luck/tmp_suning_uos_patched
rcutorture: Use atomic increment for n_rcu_torture_timers
Currently, rcu_torture_timer() relies on a lock to guard updates to n_rcu_torture_timers. Unfortunately, consolidating code with rcu_torture_reader() will dispense with this lock. This commit therefore makes n_rcu_torture_timers be an atomic_long_t and uses atomic_long_inc() to carry out the update. Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
This commit is contained in:
parent
6b06aa723e
commit
8da9a59523
|
@ -151,7 +151,7 @@ static long n_rcu_torture_boost_ktrerror;
|
|||
static long n_rcu_torture_boost_rterror;
|
||||
static long n_rcu_torture_boost_failure;
|
||||
static long n_rcu_torture_boosts;
|
||||
static long n_rcu_torture_timers;
|
||||
static atomic_long_t n_rcu_torture_timers;
|
||||
static long n_barrier_attempts;
|
||||
static long n_barrier_successes;
|
||||
static atomic_long_t n_cbfloods;
|
||||
|
@ -1160,6 +1160,7 @@ static void rcu_torture_timer(struct timer_list *unused)
|
|||
int pipe_count;
|
||||
unsigned long long ts;
|
||||
|
||||
atomic_long_inc(&n_rcu_torture_timers);
|
||||
idx = cur_ops->readlock();
|
||||
started = cur_ops->get_gp_seq();
|
||||
ts = rcu_trace_clock_local();
|
||||
|
@ -1177,7 +1178,6 @@ static void rcu_torture_timer(struct timer_list *unused)
|
|||
atomic_inc(&n_rcu_torture_mberror);
|
||||
spin_lock(&rand_lock);
|
||||
cur_ops->read_delay(&rand);
|
||||
n_rcu_torture_timers++;
|
||||
spin_unlock(&rand_lock);
|
||||
preempt_disable();
|
||||
pipe_count = p->rtort_pipe_count;
|
||||
|
@ -1290,7 +1290,7 @@ rcu_torture_stats_print(void)
|
|||
pr_cont("rtbf: %ld rtb: %ld nt: %ld ",
|
||||
n_rcu_torture_boost_failure,
|
||||
n_rcu_torture_boosts,
|
||||
n_rcu_torture_timers);
|
||||
atomic_long_read(&n_rcu_torture_timers));
|
||||
torture_onoff_stats();
|
||||
pr_cont("barrier: %ld/%ld:%ld ",
|
||||
n_barrier_successes,
|
||||
|
|
Loading…
Reference in New Issue
Block a user