forked from luck/tmp_suning_uos_patched
rcu/nocb: Use separate flag to indicate offloaded ->cblist
RCU callback processing currently uses rcu_is_nocb_cpu() to determine whether or not the current CPU's callbacks are to be offloaded. This works, but it is not so good for cache locality. Plus use of ->cblist for offloaded callbacks will greatly increase the frequency of these checks. This commit therefore adds a ->offloaded flag to the rcu_segcblist structure to provide a more flexible and cache-friendly means of checking for callback offloading. Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
This commit is contained in:
parent
1bb5f9b95a
commit
ce5215c134
|
@ -71,6 +71,7 @@ struct rcu_segcblist {
|
|||
long len;
|
||||
long len_lazy;
|
||||
u8 enabled;
|
||||
u8 offloaded;
|
||||
};
|
||||
|
||||
#define RCU_SEGCBLIST_INITIALIZER(n) \
|
||||
|
|
|
@ -73,6 +73,18 @@ void rcu_segcblist_disable(struct rcu_segcblist *rsclp)
|
|||
rsclp->enabled = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Mark the specified rcu_segcblist structure as offloaded. This
|
||||
* structure must be empty.
|
||||
*/
|
||||
void rcu_segcblist_offload(struct rcu_segcblist *rsclp)
|
||||
{
|
||||
WARN_ON_ONCE(!rcu_segcblist_empty(rsclp));
|
||||
WARN_ON_ONCE(rcu_segcblist_n_cbs(rsclp));
|
||||
WARN_ON_ONCE(rcu_segcblist_n_lazy_cbs(rsclp));
|
||||
rsclp->offloaded = 1;
|
||||
}
|
||||
|
||||
/*
|
||||
* Does the specified rcu_segcblist structure contain callbacks that
|
||||
* are ready to be invoked?
|
||||
|
|
|
@ -66,6 +66,12 @@ static inline bool rcu_segcblist_is_enabled(struct rcu_segcblist *rsclp)
|
|||
return rsclp->enabled;
|
||||
}
|
||||
|
||||
/* Is the specified rcu_segcblist offloaded? */
|
||||
static inline bool rcu_segcblist_is_offloaded(struct rcu_segcblist *rsclp)
|
||||
{
|
||||
return rsclp->offloaded;
|
||||
}
|
||||
|
||||
/*
|
||||
* Are all segments following the specified segment of the specified
|
||||
* rcu_segcblist structure empty of callbacks? (The specified
|
||||
|
@ -78,6 +84,7 @@ static inline bool rcu_segcblist_restempty(struct rcu_segcblist *rsclp, int seg)
|
|||
|
||||
void rcu_segcblist_init(struct rcu_segcblist *rsclp);
|
||||
void rcu_segcblist_disable(struct rcu_segcblist *rsclp);
|
||||
void rcu_segcblist_offload(struct rcu_segcblist *rsclp);
|
||||
bool rcu_segcblist_ready_cbs(struct rcu_segcblist *rsclp);
|
||||
bool rcu_segcblist_pend_cbs(struct rcu_segcblist *rsclp);
|
||||
struct rcu_head *rcu_segcblist_first_cb(struct rcu_segcblist *rsclp);
|
||||
|
|
|
@ -2858,10 +2858,11 @@ void rcu_barrier(void)
|
|||
* corresponding CPU's preceding callbacks have been invoked.
|
||||
*/
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (!cpu_online(cpu) && !rcu_is_nocb_cpu(cpu))
|
||||
continue;
|
||||
rdp = per_cpu_ptr(&rcu_data, cpu);
|
||||
if (rcu_is_nocb_cpu(cpu)) {
|
||||
if (!cpu_online(cpu) &&
|
||||
!rcu_segcblist_is_offloaded(&rdp->cblist))
|
||||
continue;
|
||||
if (rcu_segcblist_is_offloaded(&rdp->cblist)) {
|
||||
if (!rcu_nocb_cpu_needs_barrier(cpu)) {
|
||||
rcu_barrier_trace(TPS("OfflineNoCB"), cpu,
|
||||
rcu_state.barrier_sequence);
|
||||
|
@ -3155,7 +3156,8 @@ void rcutree_migrate_callbacks(int cpu)
|
|||
struct rcu_node *rnp_root = rcu_get_root();
|
||||
bool needwake;
|
||||
|
||||
if (rcu_is_nocb_cpu(cpu) || rcu_segcblist_empty(&rdp->cblist))
|
||||
if (rcu_segcblist_is_offloaded(&rdp->cblist) ||
|
||||
rcu_segcblist_empty(&rdp->cblist))
|
||||
return; /* No callbacks to migrate. */
|
||||
|
||||
local_irq_save(flags);
|
||||
|
|
|
@ -1382,7 +1382,7 @@ static void rcu_prepare_for_idle(void)
|
|||
int tne;
|
||||
|
||||
lockdep_assert_irqs_disabled();
|
||||
if (rcu_is_nocb_cpu(smp_processor_id()))
|
||||
if (rcu_segcblist_is_offloaded(&rdp->cblist))
|
||||
return;
|
||||
|
||||
/* Handle nohz enablement switches conservatively. */
|
||||
|
@ -1431,8 +1431,10 @@ static void rcu_prepare_for_idle(void)
|
|||
*/
|
||||
static void rcu_cleanup_after_idle(void)
|
||||
{
|
||||
struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
|
||||
|
||||
lockdep_assert_irqs_disabled();
|
||||
if (rcu_is_nocb_cpu(smp_processor_id()))
|
||||
if (rcu_segcblist_is_offloaded(&rdp->cblist))
|
||||
return;
|
||||
if (rcu_try_advance_all_cbs())
|
||||
invoke_rcu_core();
|
||||
|
@ -1694,7 +1696,7 @@ static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp,
|
|||
bool lazy, unsigned long flags)
|
||||
{
|
||||
|
||||
if (!rcu_is_nocb_cpu(rdp->cpu))
|
||||
if (!rcu_segcblist_is_offloaded(&rdp->cblist))
|
||||
return false;
|
||||
__call_rcu_nocb_enqueue(rdp, rhp, &rhp->next, 1, lazy, flags);
|
||||
if (__is_kfree_rcu_offset((unsigned long)rhp->func))
|
||||
|
@ -1729,7 +1731,7 @@ static bool __maybe_unused rcu_nocb_adopt_orphan_cbs(struct rcu_data *my_rdp,
|
|||
unsigned long flags)
|
||||
{
|
||||
lockdep_assert_irqs_disabled();
|
||||
if (!rcu_is_nocb_cpu(smp_processor_id()))
|
||||
if (!rcu_segcblist_is_offloaded(&my_rdp->cblist))
|
||||
return false; /* Not NOCBs CPU, caller must migrate CBs. */
|
||||
__call_rcu_nocb_enqueue(my_rdp, rcu_segcblist_head(&rdp->cblist),
|
||||
rcu_segcblist_tail(&rdp->cblist),
|
||||
|
@ -2192,6 +2194,7 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
|
|||
}
|
||||
rcu_segcblist_init(&rdp->cblist);
|
||||
rcu_segcblist_disable(&rdp->cblist);
|
||||
rcu_segcblist_offload(&rdp->cblist);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user