forked from luck/tmp_suning_uos_patched
md: use lockdep_assert_held
lockdep_assert_held is a better way to assert lock held, and it works for UP. Signed-off-by: Shaohua Li <shli@fb.com>
This commit is contained in:
parent
f6eca2d43e
commit
efa4b77b00
|
@ -2335,7 +2335,7 @@ static void export_array(struct mddev *mddev)
|
|||
|
||||
static bool set_in_sync(struct mddev *mddev)
|
||||
{
|
||||
WARN_ON_ONCE(NR_CPUS != 1 && !spin_is_locked(&mddev->lock));
|
||||
lockdep_assert_held(&mddev->lock);
|
||||
if (!mddev->in_sync) {
|
||||
mddev->sync_checkers++;
|
||||
spin_unlock(&mddev->lock);
|
||||
|
@ -6749,7 +6749,7 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
|
|||
|
||||
void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors)
|
||||
{
|
||||
WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__);
|
||||
lockdep_assert_held(&mddev->reconfig_mutex);
|
||||
|
||||
if (mddev->external_size)
|
||||
return;
|
||||
|
|
|
@ -500,11 +500,6 @@ static inline void mddev_lock_nointr(struct mddev *mddev)
|
|||
mutex_lock(&mddev->reconfig_mutex);
|
||||
}
|
||||
|
||||
static inline int mddev_is_locked(struct mddev *mddev)
|
||||
{
|
||||
return mutex_is_locked(&mddev->reconfig_mutex);
|
||||
}
|
||||
|
||||
static inline int mddev_trylock(struct mddev *mddev)
|
||||
{
|
||||
return mutex_trylock(&mddev->reconfig_mutex);
|
||||
|
|
|
@ -539,7 +539,7 @@ static void r5l_log_run_stripes(struct r5l_log *log)
|
|||
{
|
||||
struct r5l_io_unit *io, *next;
|
||||
|
||||
assert_spin_locked(&log->io_list_lock);
|
||||
lockdep_assert_held(&log->io_list_lock);
|
||||
|
||||
list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
|
||||
/* don't change list order */
|
||||
|
@ -555,7 +555,7 @@ static void r5l_move_to_end_ios(struct r5l_log *log)
|
|||
{
|
||||
struct r5l_io_unit *io, *next;
|
||||
|
||||
assert_spin_locked(&log->io_list_lock);
|
||||
lockdep_assert_held(&log->io_list_lock);
|
||||
|
||||
list_for_each_entry_safe(io, next, &log->running_ios, log_sibling) {
|
||||
/* don't change list order */
|
||||
|
@ -1200,7 +1200,7 @@ static void r5l_run_no_mem_stripe(struct r5l_log *log)
|
|||
{
|
||||
struct stripe_head *sh;
|
||||
|
||||
assert_spin_locked(&log->io_list_lock);
|
||||
lockdep_assert_held(&log->io_list_lock);
|
||||
|
||||
if (!list_empty(&log->no_mem_stripes)) {
|
||||
sh = list_first_entry(&log->no_mem_stripes,
|
||||
|
@ -1216,7 +1216,7 @@ static bool r5l_complete_finished_ios(struct r5l_log *log)
|
|||
struct r5l_io_unit *io, *next;
|
||||
bool found = false;
|
||||
|
||||
assert_spin_locked(&log->io_list_lock);
|
||||
lockdep_assert_held(&log->io_list_lock);
|
||||
|
||||
list_for_each_entry_safe(io, next, &log->finished_ios, log_sibling) {
|
||||
/* don't change list order */
|
||||
|
@ -1388,7 +1388,7 @@ static void r5c_flush_stripe(struct r5conf *conf, struct stripe_head *sh)
|
|||
* raid5_release_stripe() while holding conf->device_lock
|
||||
*/
|
||||
BUG_ON(test_bit(STRIPE_ON_RELEASE_LIST, &sh->state));
|
||||
assert_spin_locked(&conf->device_lock);
|
||||
lockdep_assert_held(&conf->device_lock);
|
||||
|
||||
list_del_init(&sh->lru);
|
||||
atomic_inc(&sh->count);
|
||||
|
@ -1415,7 +1415,7 @@ void r5c_flush_cache(struct r5conf *conf, int num)
|
|||
int count;
|
||||
struct stripe_head *sh, *next;
|
||||
|
||||
assert_spin_locked(&conf->device_lock);
|
||||
lockdep_assert_held(&conf->device_lock);
|
||||
if (!conf->log)
|
||||
return;
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user