forked from luck/tmp_suning_uos_patched
Merge branches 'expedited.2018.07.12a', 'fixes.2018.07.12a', 'srcu.2018.06.25b' and 'torture.2018.06.25b' into HEAD
expedited.2018.07.12a: Expedited grace-period updates. fixes.2018.07.12a: Pre-gp_seq miscellaneous fixes. srcu.2018.06.25b: SRCU updates. torture.2018.06.25b: Pre-gp_seq torture-test updates.
This commit is contained in:
commit
609af1cdf0
|
@ -195,6 +195,16 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
|
|||
return retval;
|
||||
}
|
||||
|
||||
/* Used by tracing, cannot be traced and cannot invoke lockdep. */
|
||||
static inline notrace int
|
||||
srcu_read_lock_notrace(struct srcu_struct *sp) __acquires(sp)
|
||||
{
|
||||
int retval;
|
||||
|
||||
retval = __srcu_read_lock(sp);
|
||||
return retval;
|
||||
}
|
||||
|
||||
/**
|
||||
* srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
|
||||
* @sp: srcu_struct in which to unregister the old reader.
|
||||
|
@ -209,6 +219,13 @@ static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
|
|||
__srcu_read_unlock(sp, idx);
|
||||
}
|
||||
|
||||
/* Used by tracing, cannot be traced and cannot call lockdep. */
|
||||
static inline notrace void
|
||||
srcu_read_unlock_notrace(struct srcu_struct *sp, int idx) __releases(sp)
|
||||
{
|
||||
__srcu_read_unlock(sp, idx);
|
||||
}
|
||||
|
||||
/**
|
||||
* smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock
|
||||
*
|
||||
|
|
|
@ -79,7 +79,7 @@ void stutter_wait(const char *title);
|
|||
int torture_stutter_init(int s);
|
||||
|
||||
/* Initialization and cleanup. */
|
||||
bool torture_init_begin(char *ttype, bool v);
|
||||
bool torture_init_begin(char *ttype, int v);
|
||||
void torture_init_end(void);
|
||||
bool torture_cleanup_begin(void);
|
||||
void torture_cleanup_end(void);
|
||||
|
|
|
@ -21,6 +21,9 @@
|
|||
* Davidlohr Bueso <dave@stgolabs.net>
|
||||
* Based on kernel/rcu/torture.c.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) fmt
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kthread.h>
|
||||
|
@ -57,7 +60,7 @@ torture_param(int, shutdown_secs, 0, "Shutdown time (j), <= zero to disable.");
|
|||
torture_param(int, stat_interval, 60,
|
||||
"Number of seconds between stats printk()s");
|
||||
torture_param(int, stutter, 5, "Number of jiffies to run/halt test, 0=disable");
|
||||
torture_param(bool, verbose, true,
|
||||
torture_param(int, verbose, 1,
|
||||
"Enable verbose debugging printk()s");
|
||||
|
||||
static char *torture_type = "spin_lock";
|
||||
|
|
|
@ -19,6 +19,9 @@
|
|||
*
|
||||
* Authors: Paul E. McKenney <paulmck@us.ibm.com>
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) fmt
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
|
@ -88,7 +91,7 @@ torture_param(int, nreaders, -1, "Number of RCU reader threads");
|
|||
torture_param(int, nwriters, -1, "Number of RCU updater threads");
|
||||
torture_param(bool, shutdown, !IS_ENABLED(MODULE),
|
||||
"Shutdown at end of performance tests.");
|
||||
torture_param(bool, verbose, true, "Enable verbose debugging printk()s");
|
||||
torture_param(int, verbose, 1, "Enable verbose debugging printk()s");
|
||||
torture_param(int, writer_holdoff, 0, "Holdoff (us) between GPs, zero to disable");
|
||||
|
||||
static char *perf_type = "rcu";
|
||||
|
|
|
@ -22,6 +22,9 @@
|
|||
*
|
||||
* See also: Documentation/RCU/torture.txt
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) fmt
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
|
@ -101,7 +104,7 @@ torture_param(int, test_boost_interval, 7,
|
|||
"Interval between boost tests, seconds.");
|
||||
torture_param(bool, test_no_idle_hz, true,
|
||||
"Test support for tickless idle CPUs");
|
||||
torture_param(bool, verbose, true,
|
||||
torture_param(int, verbose, 1,
|
||||
"Enable verbose debugging printk()s");
|
||||
|
||||
static char *torture_type = "rcu";
|
||||
|
|
|
@ -641,6 +641,9 @@ static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
|
|||
* period s. Losers must either ensure that their desired grace-period
|
||||
* number is recorded on at least their leaf srcu_node structure, or they
|
||||
* must take steps to invoke their own callbacks.
|
||||
*
|
||||
* Note that this function also does the work of srcu_funnel_exp_start(),
|
||||
* in some cases by directly invoking it.
|
||||
*/
|
||||
static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
|
||||
unsigned long s, bool do_norm)
|
||||
|
@ -823,17 +826,17 @@ static void srcu_leak_callback(struct rcu_head *rhp)
|
|||
* more than one CPU, this means that when "func()" is invoked, each CPU
|
||||
* is guaranteed to have executed a full memory barrier since the end of
|
||||
* its last corresponding SRCU read-side critical section whose beginning
|
||||
* preceded the call to call_rcu(). It also means that each CPU executing
|
||||
* preceded the call to call_srcu(). It also means that each CPU executing
|
||||
* an SRCU read-side critical section that continues beyond the start of
|
||||
* "func()" must have executed a memory barrier after the call_rcu()
|
||||
* "func()" must have executed a memory barrier after the call_srcu()
|
||||
* but before the beginning of that SRCU read-side critical section.
|
||||
* Note that these guarantees include CPUs that are offline, idle, or
|
||||
* executing in user mode, as well as CPUs that are executing in the kernel.
|
||||
*
|
||||
* Furthermore, if CPU A invoked call_rcu() and CPU B invoked the
|
||||
* Furthermore, if CPU A invoked call_srcu() and CPU B invoked the
|
||||
* resulting SRCU callback function "func()", then both CPU A and CPU
|
||||
* B are guaranteed to execute a full memory barrier during the time
|
||||
* interval between the call to call_rcu() and the invocation of "func()".
|
||||
* interval between the call to call_srcu() and the invocation of "func()".
|
||||
* This guarantee applies even if CPU A and CPU B are the same CPU (but
|
||||
* again only if the system has more than one CPU).
|
||||
*
|
||||
|
@ -1268,11 +1271,11 @@ void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf)
|
|||
unsigned long l0, l1;
|
||||
unsigned long u0, u1;
|
||||
long c0, c1;
|
||||
struct srcu_data *counts;
|
||||
struct srcu_data *sdp;
|
||||
|
||||
counts = per_cpu_ptr(sp->sda, cpu);
|
||||
u0 = counts->srcu_unlock_count[!idx];
|
||||
u1 = counts->srcu_unlock_count[idx];
|
||||
sdp = per_cpu_ptr(sp->sda, cpu);
|
||||
u0 = sdp->srcu_unlock_count[!idx];
|
||||
u1 = sdp->srcu_unlock_count[idx];
|
||||
|
||||
/*
|
||||
* Make sure that a lock is always counted if the corresponding
|
||||
|
@ -1280,12 +1283,13 @@ void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf)
|
|||
*/
|
||||
smp_rmb();
|
||||
|
||||
l0 = counts->srcu_lock_count[!idx];
|
||||
l1 = counts->srcu_lock_count[idx];
|
||||
l0 = sdp->srcu_lock_count[!idx];
|
||||
l1 = sdp->srcu_lock_count[idx];
|
||||
|
||||
c0 = l0 - u0;
|
||||
c1 = l1 - u1;
|
||||
pr_cont(" %d(%ld,%ld)", cpu, c0, c1);
|
||||
pr_cont(" %d(%ld,%ld %1p)",
|
||||
cpu, c0, c1, rcu_segcblist_head(&sdp->srcu_cblist));
|
||||
s0 += c0;
|
||||
s1 += c1;
|
||||
}
|
||||
|
|
|
@ -1368,7 +1368,6 @@ static inline void panic_on_rcu_stall(void)
|
|||
static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
|
||||
{
|
||||
int cpu;
|
||||
long delta;
|
||||
unsigned long flags;
|
||||
unsigned long gpa;
|
||||
unsigned long j;
|
||||
|
@ -1381,18 +1380,6 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
|
|||
if (rcu_cpu_stall_suppress)
|
||||
return;
|
||||
|
||||
/* Only let one CPU complain about others per time interval. */
|
||||
|
||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||
delta = jiffies - READ_ONCE(rsp->jiffies_stall);
|
||||
if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
return;
|
||||
}
|
||||
WRITE_ONCE(rsp->jiffies_stall,
|
||||
jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
|
||||
/*
|
||||
* OK, time to rat on our buddy...
|
||||
* See Documentation/RCU/stallwarn.txt for info on how to debug
|
||||
|
@ -1441,6 +1428,10 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
|
|||
sched_show_task(current);
|
||||
}
|
||||
}
|
||||
/* Rewrite if needed in case of slow consoles. */
|
||||
if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
|
||||
WRITE_ONCE(rsp->jiffies_stall,
|
||||
jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
|
||||
|
||||
rcu_check_gp_kthread_starvation(rsp);
|
||||
|
||||
|
@ -1485,6 +1476,7 @@ static void print_cpu_stall(struct rcu_state *rsp)
|
|||
rcu_dump_cpu_stacks(rsp);
|
||||
|
||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||
/* Rewrite if needed in case of slow consoles. */
|
||||
if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
|
||||
WRITE_ONCE(rsp->jiffies_stall,
|
||||
jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
|
||||
|
@ -1508,6 +1500,7 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
|
|||
unsigned long gpnum;
|
||||
unsigned long gps;
|
||||
unsigned long j;
|
||||
unsigned long jn;
|
||||
unsigned long js;
|
||||
struct rcu_node *rnp;
|
||||
|
||||
|
@ -1546,14 +1539,17 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
|
|||
ULONG_CMP_GE(gps, js))
|
||||
return; /* No stall or GP completed since entering function. */
|
||||
rnp = rdp->mynode;
|
||||
jn = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
|
||||
if (rcu_gp_in_progress(rsp) &&
|
||||
(READ_ONCE(rnp->qsmask) & rdp->grpmask)) {
|
||||
(READ_ONCE(rnp->qsmask) & rdp->grpmask) &&
|
||||
cmpxchg(&rsp->jiffies_stall, js, jn) == js) {
|
||||
|
||||
/* We haven't checked in, so go dump stack. */
|
||||
print_cpu_stall(rsp);
|
||||
|
||||
} else if (rcu_gp_in_progress(rsp) &&
|
||||
ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY)) {
|
||||
ULONG_CMP_GE(j, js + RCU_STALL_RAT_DELAY) &&
|
||||
cmpxchg(&rsp->jiffies_stall, js, jn) == js) {
|
||||
|
||||
/* They had a few time units to dump stack, so complain. */
|
||||
print_other_cpu_stall(rsp, gpnum);
|
||||
|
@ -1685,6 +1681,7 @@ static bool rcu_start_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
|
|||
}
|
||||
trace_rcu_this_gp(rnp_root, rdp, c, TPS("Startedroot"));
|
||||
WRITE_ONCE(rsp->gp_flags, rsp->gp_flags | RCU_GP_FLAG_INIT);
|
||||
rsp->gp_req_activity = jiffies;
|
||||
if (!rsp->gp_kthread) {
|
||||
trace_rcu_this_gp(rnp_root, rdp, c, TPS("NoGPkthread"));
|
||||
goto unlock_out;
|
||||
|
@ -2084,7 +2081,8 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
|
|||
*/
|
||||
rcu_for_each_node_breadth_first(rsp, rnp) {
|
||||
raw_spin_lock_irq_rcu_node(rnp);
|
||||
WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
|
||||
if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
|
||||
dump_blkd_tasks(rnp, 10);
|
||||
WARN_ON_ONCE(rnp->qsmask);
|
||||
WRITE_ONCE(rnp->completed, rsp->gpnum);
|
||||
rdp = this_cpu_ptr(rsp->rda);
|
||||
|
@ -2116,10 +2114,12 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
|
|||
/* Advance CBs to reduce false positives below. */
|
||||
if (!rcu_accelerate_cbs(rsp, rnp, rdp) && needgp) {
|
||||
WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
|
||||
rsp->gp_req_activity = jiffies;
|
||||
trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum),
|
||||
TPS("newreq"));
|
||||
} else {
|
||||
WRITE_ONCE(rsp->gp_flags, rsp->gp_flags & RCU_GP_FLAG_INIT);
|
||||
}
|
||||
WRITE_ONCE(rsp->gp_flags, rsp->gp_flags & RCU_GP_FLAG_INIT);
|
||||
raw_spin_unlock_irq_rcu_node(rnp);
|
||||
}
|
||||
|
||||
|
@ -2294,6 +2294,7 @@ rcu_report_qs_rnp(unsigned long mask, struct rcu_state *rsp,
|
|||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
return;
|
||||
}
|
||||
rnp->completedqs = rnp->gpnum;
|
||||
mask = rnp->grpmask;
|
||||
if (rnp->parent == NULL) {
|
||||
|
||||
|
@ -2746,6 +2747,65 @@ static void force_quiescent_state(struct rcu_state *rsp)
|
|||
rcu_gp_kthread_wake(rsp);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function checks for grace-period requests that fail to motivate
|
||||
* RCU to come out of its idle mode.
|
||||
*/
|
||||
static void
|
||||
rcu_check_gp_start_stall(struct rcu_state *rsp, struct rcu_node *rnp,
|
||||
struct rcu_data *rdp)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned long j;
|
||||
struct rcu_node *rnp_root = rcu_get_root(rsp);
|
||||
static atomic_t warned = ATOMIC_INIT(0);
|
||||
|
||||
if (!IS_ENABLED(CONFIG_PROVE_RCU) ||
|
||||
rcu_gp_in_progress(rsp) || !need_any_future_gp(rcu_get_root(rsp)))
|
||||
return;
|
||||
j = jiffies; /* Expensive access, and in common case don't get here. */
|
||||
if (time_before(j, READ_ONCE(rsp->gp_req_activity) + HZ) ||
|
||||
time_before(j, READ_ONCE(rsp->gp_activity) + HZ) ||
|
||||
atomic_read(&warned))
|
||||
return;
|
||||
|
||||
raw_spin_lock_irqsave_rcu_node(rnp, flags);
|
||||
j = jiffies;
|
||||
if (rcu_gp_in_progress(rsp) || !need_any_future_gp(rcu_get_root(rsp)) ||
|
||||
time_before(j, READ_ONCE(rsp->gp_req_activity) + HZ) ||
|
||||
time_before(j, READ_ONCE(rsp->gp_activity) + HZ) ||
|
||||
atomic_read(&warned)) {
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
return;
|
||||
}
|
||||
/* Hold onto the leaf lock to make others see warned==1. */
|
||||
|
||||
if (rnp_root != rnp)
|
||||
raw_spin_lock_rcu_node(rnp_root); /* irqs already disabled. */
|
||||
j = jiffies;
|
||||
if (rcu_gp_in_progress(rsp) || !need_any_future_gp(rcu_get_root(rsp)) ||
|
||||
time_before(j, rsp->gp_req_activity + HZ) ||
|
||||
time_before(j, rsp->gp_activity + HZ) ||
|
||||
atomic_xchg(&warned, 1)) {
|
||||
raw_spin_unlock_rcu_node(rnp_root); /* irqs remain disabled. */
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
return;
|
||||
}
|
||||
pr_alert("%s: g%lu %d%d%d%d gar:%lu ga:%lu f%#x %s->state:%#lx\n",
|
||||
__func__, READ_ONCE(rsp->gpnum),
|
||||
need_future_gp_element(rcu_get_root(rsp), 0),
|
||||
need_future_gp_element(rcu_get_root(rsp), 1),
|
||||
need_future_gp_element(rcu_get_root(rsp), 2),
|
||||
need_future_gp_element(rcu_get_root(rsp), 3),
|
||||
j - rsp->gp_req_activity, j - rsp->gp_activity,
|
||||
rsp->gp_flags, rsp->name,
|
||||
rsp->gp_kthread ? rsp->gp_kthread->state : 0x1ffffL);
|
||||
WARN_ON(1);
|
||||
if (rnp_root != rnp)
|
||||
raw_spin_unlock_rcu_node(rnp_root);
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
* This does the RCU core processing work for the specified rcu_state
|
||||
* and rcu_data structures. This may be called only from the CPU to
|
||||
|
@ -2757,7 +2817,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
|
|||
unsigned long flags;
|
||||
bool needwake;
|
||||
struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
|
||||
struct rcu_node *rnp;
|
||||
struct rcu_node *rnp = rdp->mynode;
|
||||
|
||||
WARN_ON_ONCE(!rdp->beenonline);
|
||||
|
||||
|
@ -2771,7 +2831,6 @@ __rcu_process_callbacks(struct rcu_state *rsp)
|
|||
if (rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) {
|
||||
local_irq_restore(flags);
|
||||
} else {
|
||||
rnp = rdp->mynode;
|
||||
raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
|
||||
needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
|
||||
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
|
||||
|
@ -2780,6 +2839,8 @@ __rcu_process_callbacks(struct rcu_state *rsp)
|
|||
}
|
||||
}
|
||||
|
||||
rcu_check_gp_start_stall(rsp, rnp, rdp);
|
||||
|
||||
/* If there are callbacks ready, invoke them. */
|
||||
if (rcu_segcblist_ready_cbs(&rdp->cblist))
|
||||
invoke_rcu_callbacks(rsp, rdp);
|
||||
|
@ -3930,6 +3991,7 @@ static void __init rcu_init_one(struct rcu_state *rsp)
|
|||
&rcu_fqs_class[i], fqs[i]);
|
||||
rnp->gpnum = rsp->gpnum;
|
||||
rnp->completed = rsp->completed;
|
||||
rnp->completedqs = rsp->completed;
|
||||
rnp->qsmask = 0;
|
||||
rnp->qsmaskinit = 0;
|
||||
rnp->grplo = j * cpustride;
|
||||
|
|
|
@ -87,6 +87,7 @@ struct rcu_node {
|
|||
unsigned long completed; /* Last GP completed for this node. */
|
||||
/* This will either be equal to or one */
|
||||
/* behind the root rcu_node's gpnum. */
|
||||
unsigned long completedqs; /* All QSes done for this node. */
|
||||
unsigned long qsmask; /* CPUs or groups that need to switch in */
|
||||
/* order for current grace period to proceed.*/
|
||||
/* In leaf rcu_node, each bit corresponds to */
|
||||
|
@ -373,6 +374,8 @@ struct rcu_state {
|
|||
/* but in jiffies. */
|
||||
unsigned long gp_activity; /* Time of last GP kthread */
|
||||
/* activity in jiffies. */
|
||||
unsigned long gp_req_activity; /* Time of last GP request */
|
||||
/* in jiffies. */
|
||||
unsigned long jiffies_stall; /* Time at which to check */
|
||||
/* for CPU stalls. */
|
||||
unsigned long jiffies_resched; /* Time at which to resched */
|
||||
|
@ -453,6 +456,7 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
|
|||
static void rcu_preempt_check_callbacks(void);
|
||||
void call_rcu(struct rcu_head *head, rcu_callback_t func);
|
||||
static void __init __rcu_init_preempt(void);
|
||||
static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck);
|
||||
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
|
||||
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
|
||||
static void invoke_rcu_callbacks_kthread(void);
|
||||
|
|
|
@ -260,8 +260,10 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
|
|||
* ->exp_tasks pointers, respectively, to reference the newly
|
||||
* blocked tasks.
|
||||
*/
|
||||
if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD))
|
||||
if (!rnp->gp_tasks && (blkd_state & RCU_GP_BLKD)) {
|
||||
rnp->gp_tasks = &t->rcu_node_entry;
|
||||
WARN_ON_ONCE(rnp->completedqs == rnp->gpnum);
|
||||
}
|
||||
if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
|
||||
rnp->exp_tasks = &t->rcu_node_entry;
|
||||
WARN_ON_ONCE(!(blkd_state & RCU_GP_BLKD) !=
|
||||
|
@ -535,6 +537,8 @@ void rcu_read_unlock_special(struct task_struct *t)
|
|||
WARN_ON_ONCE(rnp != t->rcu_blocked_node);
|
||||
WARN_ON_ONCE(!rcu_is_leaf_node(rnp));
|
||||
empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
|
||||
WARN_ON_ONCE(rnp->completedqs == rnp->gpnum &&
|
||||
(!empty_norm || rnp->qsmask));
|
||||
empty_exp = sync_rcu_preempt_exp_done(rnp);
|
||||
smp_mb(); /* ensure expedited fastpath sees end of RCU c-s. */
|
||||
np = rcu_next_node_entry(t, rnp);
|
||||
|
@ -697,7 +701,8 @@ static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp)
|
|||
struct task_struct *t;
|
||||
|
||||
RCU_LOCKDEP_WARN(preemptible(), "rcu_preempt_check_blocked_tasks() invoked with preemption enabled!!!\n");
|
||||
WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
|
||||
if (WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)))
|
||||
dump_blkd_tasks(rnp, 10);
|
||||
if (rcu_preempt_has_tasks(rnp)) {
|
||||
rnp->gp_tasks = rnp->blkd_tasks.next;
|
||||
t = container_of(rnp->gp_tasks, struct task_struct,
|
||||
|
@ -841,6 +846,27 @@ void exit_rcu(void)
|
|||
__rcu_read_unlock();
|
||||
}
|
||||
|
||||
/*
|
||||
* Dump the blocked-tasks state, but limit the list dump to the
|
||||
* specified number of elements.
|
||||
*/
|
||||
static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
|
||||
{
|
||||
int i;
|
||||
struct list_head *lhp;
|
||||
|
||||
raw_lockdep_assert_held_rcu_node(rnp);
|
||||
pr_info("%s: grp: %d-%d level: %d ->qamask %#lx ->gp_tasks %p ->boost_tasks %p ->exp_tasks %p &->blkd_tasks: %p offset: %u\n", __func__, rnp->grplo, rnp->grphi, rnp->level, rnp->qsmask, rnp->gp_tasks, rnp->boost_tasks, rnp->exp_tasks, &rnp->blkd_tasks, (unsigned int)offsetof(typeof(*rnp), blkd_tasks));
|
||||
pr_cont("\t->blkd_tasks");
|
||||
i = 0;
|
||||
list_for_each(lhp, &rnp->blkd_tasks) {
|
||||
pr_cont(" %p", lhp);
|
||||
if (++i >= 10)
|
||||
break;
|
||||
}
|
||||
pr_cont("\n");
|
||||
}
|
||||
|
||||
#else /* #ifdef CONFIG_PREEMPT_RCU */
|
||||
|
||||
static struct rcu_state *const rcu_state_p = &rcu_sched_state;
|
||||
|
@ -949,6 +975,14 @@ void exit_rcu(void)
|
|||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Dump the guaranteed-empty blocked-tasks state. Trust but verify.
|
||||
*/
|
||||
static void dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
|
||||
{
|
||||
WARN_ON_ONCE(!list_empty(&rnp->blkd_tasks));
|
||||
}
|
||||
|
||||
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
|
||||
|
||||
#ifdef CONFIG_RCU_BOOST
|
||||
|
|
|
@ -20,6 +20,9 @@
|
|||
* Author: Paul E. McKenney <paulmck@us.ibm.com>
|
||||
* Based on kernel/rcu/torture.c.
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) fmt
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
|
@ -53,7 +56,7 @@ MODULE_LICENSE("GPL");
|
|||
MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com>");
|
||||
|
||||
static char *torture_type;
|
||||
static bool verbose;
|
||||
static int verbose;
|
||||
|
||||
/* Mediate rmmod and system shutdown. Concurrent rmmod & shutdown illegal! */
|
||||
#define FULLSTOP_DONTSTOP 0 /* Normal operation. */
|
||||
|
@ -98,7 +101,7 @@ bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes,
|
|||
if (!cpu_online(cpu) || !cpu_is_hotpluggable(cpu))
|
||||
return false;
|
||||
|
||||
if (verbose)
|
||||
if (verbose > 1)
|
||||
pr_alert("%s" TORTURE_FLAG
|
||||
"torture_onoff task: offlining %d\n",
|
||||
torture_type, cpu);
|
||||
|
@ -111,7 +114,7 @@ bool torture_offline(int cpu, long *n_offl_attempts, long *n_offl_successes,
|
|||
"torture_onoff task: offline %d failed: errno %d\n",
|
||||
torture_type, cpu, ret);
|
||||
} else {
|
||||
if (verbose)
|
||||
if (verbose > 1)
|
||||
pr_alert("%s" TORTURE_FLAG
|
||||
"torture_onoff task: offlined %d\n",
|
||||
torture_type, cpu);
|
||||
|
@ -147,7 +150,7 @@ bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes,
|
|||
if (cpu_online(cpu) || !cpu_is_hotpluggable(cpu))
|
||||
return false;
|
||||
|
||||
if (verbose)
|
||||
if (verbose > 1)
|
||||
pr_alert("%s" TORTURE_FLAG
|
||||
"torture_onoff task: onlining %d\n",
|
||||
torture_type, cpu);
|
||||
|
@ -160,7 +163,7 @@ bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes,
|
|||
"torture_onoff task: online %d failed: errno %d\n",
|
||||
torture_type, cpu, ret);
|
||||
} else {
|
||||
if (verbose)
|
||||
if (verbose > 1)
|
||||
pr_alert("%s" TORTURE_FLAG
|
||||
"torture_onoff task: onlined %d\n",
|
||||
torture_type, cpu);
|
||||
|
@ -647,7 +650,7 @@ static void torture_stutter_cleanup(void)
|
|||
* The runnable parameter points to a flag that controls whether or not
|
||||
* the test is currently runnable. If there is no such flag, pass in NULL.
|
||||
*/
|
||||
bool torture_init_begin(char *ttype, bool v)
|
||||
bool torture_init_begin(char *ttype, int v)
|
||||
{
|
||||
mutex_lock(&fullstop_mutex);
|
||||
if (torture_type != NULL) {
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Usage: configinit.sh config-spec-file [ build output dir ]
|
||||
# Usage: configinit.sh config-spec-file build-output-dir results-dir
|
||||
#
|
||||
# Create a .config file from the spec file. Run from the kernel source tree.
|
||||
# Exits with 0 if all went well, with 1 if all went well but the config
|
||||
|
@ -40,20 +40,18 @@ mkdir $T
|
|||
|
||||
c=$1
|
||||
buildloc=$2
|
||||
resdir=$3
|
||||
builddir=
|
||||
if test -n $buildloc
|
||||
if echo $buildloc | grep -q '^O='
|
||||
then
|
||||
if echo $buildloc | grep -q '^O='
|
||||
builddir=`echo $buildloc | sed -e 's/^O=//'`
|
||||
if test ! -d $builddir
|
||||
then
|
||||
builddir=`echo $buildloc | sed -e 's/^O=//'`
|
||||
if test ! -d $builddir
|
||||
then
|
||||
mkdir $builddir
|
||||
fi
|
||||
else
|
||||
echo Bad build directory: \"$buildloc\"
|
||||
exit 2
|
||||
mkdir $builddir
|
||||
fi
|
||||
else
|
||||
echo Bad build directory: \"$buildloc\"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
sed -e 's/^\(CONFIG[0-9A-Z_]*\)=.*$/grep -v "^# \1" |/' < $c > $T/u.sh
|
||||
|
@ -61,12 +59,12 @@ sed -e 's/^\(CONFIG[0-9A-Z_]*=\).*$/grep -v \1 |/' < $c >> $T/u.sh
|
|||
grep '^grep' < $T/u.sh > $T/upd.sh
|
||||
echo "cat - $c" >> $T/upd.sh
|
||||
make mrproper
|
||||
make $buildloc distclean > $builddir/Make.distclean 2>&1
|
||||
make $buildloc $TORTURE_DEFCONFIG > $builddir/Make.defconfig.out 2>&1
|
||||
make $buildloc distclean > $resdir/Make.distclean 2>&1
|
||||
make $buildloc $TORTURE_DEFCONFIG > $resdir/Make.defconfig.out 2>&1
|
||||
mv $builddir/.config $builddir/.config.sav
|
||||
sh $T/upd.sh < $builddir/.config.sav > $builddir/.config
|
||||
cp $builddir/.config $builddir/.config.new
|
||||
yes '' | make $buildloc oldconfig > $builddir/Make.oldconfig.out 2> $builddir/Make.oldconfig.err
|
||||
yes '' | make $buildloc oldconfig > $resdir/Make.oldconfig.out 2> $resdir/Make.oldconfig.err
|
||||
|
||||
# verify new config matches specification.
|
||||
configcheck.sh $builddir/.config $c
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
#
|
||||
# Build a kvm-ready Linux kernel from the tree in the current directory.
|
||||
#
|
||||
# Usage: kvm-build.sh config-template build-dir
|
||||
# Usage: kvm-build.sh config-template build-dir resdir
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
|
@ -29,6 +29,7 @@ then
|
|||
exit 1
|
||||
fi
|
||||
builddir=${2}
|
||||
resdir=${3}
|
||||
|
||||
T=${TMPDIR-/tmp}/test-linux.sh.$$
|
||||
trap 'rm -rf $T' 0
|
||||
|
@ -41,19 +42,19 @@ CONFIG_VIRTIO_PCI=y
|
|||
CONFIG_VIRTIO_CONSOLE=y
|
||||
___EOF___
|
||||
|
||||
configinit.sh $T/config O=$builddir
|
||||
configinit.sh $T/config O=$builddir $resdir
|
||||
retval=$?
|
||||
if test $retval -gt 1
|
||||
then
|
||||
exit 2
|
||||
fi
|
||||
ncpus=`cpus2use.sh`
|
||||
make O=$builddir -j$ncpus $TORTURE_KMAKE_ARG > $builddir/Make.out 2>&1
|
||||
make O=$builddir -j$ncpus $TORTURE_KMAKE_ARG > $resdir/Make.out 2>&1
|
||||
retval=$?
|
||||
if test $retval -ne 0 || grep "rcu[^/]*": < $builddir/Make.out | egrep -q "Stop|Error|error:|warning:" || egrep -q "Stop|Error|error:" < $builddir/Make.out
|
||||
if test $retval -ne 0 || grep "rcu[^/]*": < $resdir/Make.out | egrep -q "Stop|Error|error:|warning:" || egrep -q "Stop|Error|error:" < $resdir/Make.out
|
||||
then
|
||||
echo Kernel build error
|
||||
egrep "Stop|Error|error:|warning:" < $builddir/Make.out
|
||||
egrep "Stop|Error|error:|warning:" < $resdir/Make.out
|
||||
echo Run aborted.
|
||||
exit 3
|
||||
fi
|
||||
|
|
|
@ -70,4 +70,5 @@ else
|
|||
else
|
||||
print_warning $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i
|
||||
fi
|
||||
echo $nclosecalls "Reader Batch close calls in" $(($dur/60)) minute run: $i > $i/console.log.rcu.diags
|
||||
fi
|
||||
|
|
|
@ -39,6 +39,7 @@ do
|
|||
head -1 $resdir/log
|
||||
fi
|
||||
TORTURE_SUITE="`cat $i/../TORTURE_SUITE`"
|
||||
rm -f $i/console.log.*.diags
|
||||
kvm-recheck-${TORTURE_SUITE}.sh $i
|
||||
if test -f "$i/console.log"
|
||||
then
|
||||
|
|
|
@ -98,14 +98,15 @@ then
|
|||
ln -s $base_resdir/.config $resdir # for kvm-recheck.sh
|
||||
# Arch-independent indicator
|
||||
touch $resdir/builtkernel
|
||||
elif kvm-build.sh $T/Kc2 $builddir
|
||||
elif kvm-build.sh $T/Kc2 $builddir $resdir
|
||||
then
|
||||
# Had to build a kernel for this test.
|
||||
QEMU="`identify_qemu $builddir/vmlinux`"
|
||||
BOOT_IMAGE="`identify_boot_image $QEMU`"
|
||||
cp $builddir/Make*.out $resdir
|
||||
cp $builddir/vmlinux $resdir
|
||||
cp $builddir/.config $resdir
|
||||
cp $builddir/Module.symvers $resdir > /dev/null || :
|
||||
cp $builddir/System.map $resdir > /dev/null || :
|
||||
if test -n "$BOOT_IMAGE"
|
||||
then
|
||||
cp $builddir/$BOOT_IMAGE $resdir
|
||||
|
|
|
@ -347,7 +347,7 @@ function dump(first, pastlast, batchnum)
|
|||
print "needqemurun="
|
||||
jn=1
|
||||
for (j = first; j < pastlast; j++) {
|
||||
builddir=KVM "/b" jn
|
||||
builddir=KVM "/b1"
|
||||
cpusr[jn] = cpus[j];
|
||||
if (cfrep[cf[j]] == "") {
|
||||
cfr[jn] = cf[j];
|
||||
|
|
|
@ -163,6 +163,13 @@ then
|
|||
print_warning Summary: $summary
|
||||
cat $T.diags >> $file.diags
|
||||
fi
|
||||
for i in $file.*.diags
|
||||
do
|
||||
if test -f "$i"
|
||||
then
|
||||
cat $i >> $file.diags
|
||||
fi
|
||||
done
|
||||
if ! test -s $file.diags
|
||||
then
|
||||
rm -f $file.diags
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
rcutree.rcu_fanout_exact=1
|
Loading…
Reference in New Issue
Block a user