forked from luck/tmp_suning_uos_patched
locking/percpu-rwsem: Make use of the rcu_sync infrastructure
Currently down_write/up_write calls synchronize_sched_expedited() twice, which is evil. Change this code to rely on rcu-sync primitives. This avoids the _expedited "big hammer", and this can be faster in the contended case or even in the case when a single thread does down_write/up_write in a loop. Of course, a single down_write() will take more time, but otoh it will be much more friendly to the whole system. To simplify the review this patch doesn't update the comments, fixed by the next change. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Reviewed-by: Josh Triplett <josh@joshtriplett.org>
This commit is contained in:
parent
95b19f684c
commit
001dac627f
|
@ -5,11 +5,12 @@
|
||||||
#include <linux/rwsem.h>
|
#include <linux/rwsem.h>
|
||||||
#include <linux/percpu.h>
|
#include <linux/percpu.h>
|
||||||
#include <linux/wait.h>
|
#include <linux/wait.h>
|
||||||
|
#include <linux/rcu_sync.h>
|
||||||
#include <linux/lockdep.h>
|
#include <linux/lockdep.h>
|
||||||
|
|
||||||
struct percpu_rw_semaphore {
|
struct percpu_rw_semaphore {
|
||||||
|
struct rcu_sync rss;
|
||||||
unsigned int __percpu *fast_read_ctr;
|
unsigned int __percpu *fast_read_ctr;
|
||||||
atomic_t write_ctr;
|
|
||||||
struct rw_semaphore rw_sem;
|
struct rw_semaphore rw_sem;
|
||||||
atomic_t slow_read_ctr;
|
atomic_t slow_read_ctr;
|
||||||
wait_queue_head_t write_waitq;
|
wait_queue_head_t write_waitq;
|
||||||
|
|
|
@ -17,7 +17,7 @@ int __percpu_init_rwsem(struct percpu_rw_semaphore *brw,
|
||||||
|
|
||||||
/* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
|
/* ->rw_sem represents the whole percpu_rw_semaphore for lockdep */
|
||||||
__init_rwsem(&brw->rw_sem, name, rwsem_key);
|
__init_rwsem(&brw->rw_sem, name, rwsem_key);
|
||||||
atomic_set(&brw->write_ctr, 0);
|
rcu_sync_init(&brw->rss, RCU_SCHED_SYNC);
|
||||||
atomic_set(&brw->slow_read_ctr, 0);
|
atomic_set(&brw->slow_read_ctr, 0);
|
||||||
init_waitqueue_head(&brw->write_waitq);
|
init_waitqueue_head(&brw->write_waitq);
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -33,6 +33,7 @@ void percpu_free_rwsem(struct percpu_rw_semaphore *brw)
|
||||||
if (!brw->fast_read_ctr)
|
if (!brw->fast_read_ctr)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
rcu_sync_dtor(&brw->rss);
|
||||||
free_percpu(brw->fast_read_ctr);
|
free_percpu(brw->fast_read_ctr);
|
||||||
brw->fast_read_ctr = NULL; /* catch use after free bugs */
|
brw->fast_read_ctr = NULL; /* catch use after free bugs */
|
||||||
}
|
}
|
||||||
|
@ -62,13 +63,12 @@ void percpu_free_rwsem(struct percpu_rw_semaphore *brw)
|
||||||
*/
|
*/
|
||||||
static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val)
|
static bool update_fast_ctr(struct percpu_rw_semaphore *brw, unsigned int val)
|
||||||
{
|
{
|
||||||
bool success = false;
|
bool success;
|
||||||
|
|
||||||
preempt_disable();
|
preempt_disable();
|
||||||
if (likely(!atomic_read(&brw->write_ctr))) {
|
success = rcu_sync_is_idle(&brw->rss);
|
||||||
|
if (likely(success))
|
||||||
__this_cpu_add(*brw->fast_read_ctr, val);
|
__this_cpu_add(*brw->fast_read_ctr, val);
|
||||||
success = true;
|
|
||||||
}
|
|
||||||
preempt_enable();
|
preempt_enable();
|
||||||
|
|
||||||
return success;
|
return success;
|
||||||
|
@ -149,8 +149,6 @@ static int clear_fast_ctr(struct percpu_rw_semaphore *brw)
|
||||||
*/
|
*/
|
||||||
void percpu_down_write(struct percpu_rw_semaphore *brw)
|
void percpu_down_write(struct percpu_rw_semaphore *brw)
|
||||||
{
|
{
|
||||||
/* tell update_fast_ctr() there is a pending writer */
|
|
||||||
atomic_inc(&brw->write_ctr);
|
|
||||||
/*
|
/*
|
||||||
* 1. Ensures that write_ctr != 0 is visible to any down_read/up_read
|
* 1. Ensures that write_ctr != 0 is visible to any down_read/up_read
|
||||||
* so that update_fast_ctr() can't succeed.
|
* so that update_fast_ctr() can't succeed.
|
||||||
|
@ -162,7 +160,7 @@ void percpu_down_write(struct percpu_rw_semaphore *brw)
|
||||||
* fast-path, it executes a full memory barrier before we return.
|
* fast-path, it executes a full memory barrier before we return.
|
||||||
* See R_W case in the comment above update_fast_ctr().
|
* See R_W case in the comment above update_fast_ctr().
|
||||||
*/
|
*/
|
||||||
synchronize_sched_expedited();
|
rcu_sync_enter(&brw->rss);
|
||||||
|
|
||||||
/* exclude other writers, and block the new readers completely */
|
/* exclude other writers, and block the new readers completely */
|
||||||
down_write(&brw->rw_sem);
|
down_write(&brw->rw_sem);
|
||||||
|
@ -183,8 +181,6 @@ void percpu_up_write(struct percpu_rw_semaphore *brw)
|
||||||
* Insert the barrier before the next fast-path in down_read,
|
* Insert the barrier before the next fast-path in down_read,
|
||||||
* see W_R case in the comment above update_fast_ctr().
|
* see W_R case in the comment above update_fast_ctr().
|
||||||
*/
|
*/
|
||||||
synchronize_sched_expedited();
|
rcu_sync_exit(&brw->rss);
|
||||||
/* the last writer unblocks update_fast_ctr() */
|
|
||||||
atomic_dec(&brw->write_ctr);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(percpu_up_write);
|
EXPORT_SYMBOL_GPL(percpu_up_write);
|
||||||
|
|
Loading…
Reference in New Issue
Block a user