forked from luck/tmp_suning_uos_patched
[PATCH] md: provide proper rcu_dereference / rcu_assign_pointer annotations in md
Acked-by: <paulmck@us.ibm.com> Signed-off-by: Suzanne Wood <suzannew@cs.pdx.edu> Signed-off-by: Neil Brown <neilb@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
parent
9d88883e68
commit
d6065f7bf8
|
@ -63,7 +63,7 @@ static int multipath_map (multipath_conf_t *conf)
|
|||
|
||||
rcu_read_lock();
|
||||
for (i = 0; i < disks; i++) {
|
||||
mdk_rdev_t *rdev = conf->multipaths[i].rdev;
|
||||
mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
|
||||
if (rdev && rdev->in_sync) {
|
||||
atomic_inc(&rdev->nr_pending);
|
||||
rcu_read_unlock();
|
||||
|
@ -139,7 +139,7 @@ static void unplug_slaves(mddev_t *mddev)
|
|||
|
||||
rcu_read_lock();
|
||||
for (i=0; i<mddev->raid_disks; i++) {
|
||||
mdk_rdev_t *rdev = conf->multipaths[i].rdev;
|
||||
mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
|
||||
if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) {
|
||||
request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
|
||||
|
||||
|
@ -224,7 +224,7 @@ static int multipath_issue_flush(request_queue_t *q, struct gendisk *disk,
|
|||
|
||||
rcu_read_lock();
|
||||
for (i=0; i<mddev->raid_disks && ret == 0; i++) {
|
||||
mdk_rdev_t *rdev = conf->multipaths[i].rdev;
|
||||
mdk_rdev_t *rdev = rcu_dereference(conf->multipaths[i].rdev);
|
||||
if (rdev && !rdev->faulty) {
|
||||
struct block_device *bdev = rdev->bdev;
|
||||
request_queue_t *r_queue = bdev_get_queue(bdev);
|
||||
|
@ -331,7 +331,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
|
|||
conf->working_disks++;
|
||||
rdev->raid_disk = path;
|
||||
rdev->in_sync = 1;
|
||||
p->rdev = rdev;
|
||||
rcu_assign_pointer(p->rdev, rdev);
|
||||
found = 1;
|
||||
}
|
||||
|
||||
|
|
|
@ -416,10 +416,10 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
|
|||
/* Choose the first operation device, for consistancy */
|
||||
new_disk = 0;
|
||||
|
||||
for (rdev = conf->mirrors[new_disk].rdev;
|
||||
for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
|
||||
!rdev || !rdev->in_sync
|
||||
|| test_bit(WriteMostly, &rdev->flags);
|
||||
rdev = conf->mirrors[++new_disk].rdev) {
|
||||
rdev = rcu_dereference(conf->mirrors[++new_disk].rdev)) {
|
||||
|
||||
if (rdev && rdev->in_sync)
|
||||
wonly_disk = new_disk;
|
||||
|
@ -434,10 +434,10 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
|
|||
|
||||
|
||||
/* make sure the disk is operational */
|
||||
for (rdev = conf->mirrors[new_disk].rdev;
|
||||
for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
|
||||
!rdev || !rdev->in_sync ||
|
||||
test_bit(WriteMostly, &rdev->flags);
|
||||
rdev = conf->mirrors[new_disk].rdev) {
|
||||
rdev = rcu_dereference(conf->mirrors[new_disk].rdev)) {
|
||||
|
||||
if (rdev && rdev->in_sync)
|
||||
wonly_disk = new_disk;
|
||||
|
@ -474,7 +474,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
|
|||
disk = conf->raid_disks;
|
||||
disk--;
|
||||
|
||||
rdev = conf->mirrors[disk].rdev;
|
||||
rdev = rcu_dereference(conf->mirrors[disk].rdev);
|
||||
|
||||
if (!rdev ||
|
||||
!rdev->in_sync ||
|
||||
|
@ -496,7 +496,7 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
|
|||
|
||||
|
||||
if (new_disk >= 0) {
|
||||
rdev = conf->mirrors[new_disk].rdev;
|
||||
rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
|
||||
if (!rdev)
|
||||
goto retry;
|
||||
atomic_inc(&rdev->nr_pending);
|
||||
|
@ -522,7 +522,7 @@ static void unplug_slaves(mddev_t *mddev)
|
|||
|
||||
rcu_read_lock();
|
||||
for (i=0; i<mddev->raid_disks; i++) {
|
||||
mdk_rdev_t *rdev = conf->mirrors[i].rdev;
|
||||
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
|
||||
if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) {
|
||||
request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
|
||||
|
||||
|
@ -556,7 +556,7 @@ static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk,
|
|||
|
||||
rcu_read_lock();
|
||||
for (i=0; i<mddev->raid_disks && ret == 0; i++) {
|
||||
mdk_rdev_t *rdev = conf->mirrors[i].rdev;
|
||||
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
|
||||
if (rdev && !rdev->faulty) {
|
||||
struct block_device *bdev = rdev->bdev;
|
||||
request_queue_t *r_queue = bdev_get_queue(bdev);
|
||||
|
@ -728,7 +728,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
|
|||
#endif
|
||||
rcu_read_lock();
|
||||
for (i = 0; i < disks; i++) {
|
||||
if ((rdev=conf->mirrors[i].rdev) != NULL &&
|
||||
if ((rdev=rcu_dereference(conf->mirrors[i].rdev)) != NULL &&
|
||||
!rdev->faulty) {
|
||||
atomic_inc(&rdev->nr_pending);
|
||||
if (rdev->faulty) {
|
||||
|
@ -954,7 +954,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
|
|||
found = 1;
|
||||
if (rdev->saved_raid_disk != mirror)
|
||||
conf->fullsync = 1;
|
||||
p->rdev = rdev;
|
||||
rcu_assign_pointer(p->rdev, rdev);
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -496,6 +496,7 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
|
|||
int disk, slot, nslot;
|
||||
const int sectors = r10_bio->sectors;
|
||||
sector_t new_distance, current_distance;
|
||||
mdk_rdev_t *rdev;
|
||||
|
||||
raid10_find_phys(conf, r10_bio);
|
||||
rcu_read_lock();
|
||||
|
@ -510,8 +511,8 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
|
|||
slot = 0;
|
||||
disk = r10_bio->devs[slot].devnum;
|
||||
|
||||
while (!conf->mirrors[disk].rdev ||
|
||||
!conf->mirrors[disk].rdev->in_sync) {
|
||||
while ((rdev = rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
|
||||
!rdev->in_sync) {
|
||||
slot++;
|
||||
if (slot == conf->copies) {
|
||||
slot = 0;
|
||||
|
@ -527,8 +528,8 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
|
|||
/* make sure the disk is operational */
|
||||
slot = 0;
|
||||
disk = r10_bio->devs[slot].devnum;
|
||||
while (!conf->mirrors[disk].rdev ||
|
||||
!conf->mirrors[disk].rdev->in_sync) {
|
||||
while ((rdev=rcu_dereference(conf->mirrors[disk].rdev)) == NULL ||
|
||||
!rdev->in_sync) {
|
||||
slot ++;
|
||||
if (slot == conf->copies) {
|
||||
disk = -1;
|
||||
|
@ -547,11 +548,11 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
|
|||
int ndisk = r10_bio->devs[nslot].devnum;
|
||||
|
||||
|
||||
if (!conf->mirrors[ndisk].rdev ||
|
||||
!conf->mirrors[ndisk].rdev->in_sync)
|
||||
if ((rdev=rcu_dereference(conf->mirrors[ndisk].rdev)) == NULL ||
|
||||
!rdev->in_sync)
|
||||
continue;
|
||||
|
||||
if (!atomic_read(&conf->mirrors[ndisk].rdev->nr_pending)) {
|
||||
if (!atomic_read(&rdev->nr_pending)) {
|
||||
disk = ndisk;
|
||||
slot = nslot;
|
||||
break;
|
||||
|
@ -569,7 +570,7 @@ static int read_balance(conf_t *conf, r10bio_t *r10_bio)
|
|||
r10_bio->read_slot = slot;
|
||||
/* conf->next_seq_sect = this_sector + sectors;*/
|
||||
|
||||
if (disk >= 0 && conf->mirrors[disk].rdev)
|
||||
if (disk >= 0 && (rdev=rcu_dereference(conf->mirrors[disk].rdev))!= NULL)
|
||||
atomic_inc(&conf->mirrors[disk].rdev->nr_pending);
|
||||
rcu_read_unlock();
|
||||
|
||||
|
@ -583,7 +584,7 @@ static void unplug_slaves(mddev_t *mddev)
|
|||
|
||||
rcu_read_lock();
|
||||
for (i=0; i<mddev->raid_disks; i++) {
|
||||
mdk_rdev_t *rdev = conf->mirrors[i].rdev;
|
||||
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
|
||||
if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) {
|
||||
request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
|
||||
|
||||
|
@ -614,7 +615,7 @@ static int raid10_issue_flush(request_queue_t *q, struct gendisk *disk,
|
|||
|
||||
rcu_read_lock();
|
||||
for (i=0; i<mddev->raid_disks && ret == 0; i++) {
|
||||
mdk_rdev_t *rdev = conf->mirrors[i].rdev;
|
||||
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
|
||||
if (rdev && !rdev->faulty) {
|
||||
struct block_device *bdev = rdev->bdev;
|
||||
request_queue_t *r_queue = bdev_get_queue(bdev);
|
||||
|
@ -768,9 +769,10 @@ static int make_request(request_queue_t *q, struct bio * bio)
|
|||
rcu_read_lock();
|
||||
for (i = 0; i < conf->copies; i++) {
|
||||
int d = r10_bio->devs[i].devnum;
|
||||
if (conf->mirrors[d].rdev &&
|
||||
!conf->mirrors[d].rdev->faulty) {
|
||||
atomic_inc(&conf->mirrors[d].rdev->nr_pending);
|
||||
mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev);
|
||||
if (rdev &&
|
||||
!rdev->faulty) {
|
||||
atomic_inc(&rdev->nr_pending);
|
||||
r10_bio->devs[i].bio = bio;
|
||||
} else
|
||||
r10_bio->devs[i].bio = NULL;
|
||||
|
@ -980,7 +982,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
|
|||
p->head_position = 0;
|
||||
rdev->raid_disk = mirror;
|
||||
found = 1;
|
||||
p->rdev = rdev;
|
||||
rcu_assign_pointer(p->rdev, rdev);
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -1374,7 +1374,7 @@ static void handle_stripe(struct stripe_head *sh)
|
|||
bi->bi_end_io = raid5_end_read_request;
|
||||
|
||||
rcu_read_lock();
|
||||
rdev = conf->disks[i].rdev;
|
||||
rdev = rcu_dereference(conf->disks[i].rdev);
|
||||
if (rdev && rdev->faulty)
|
||||
rdev = NULL;
|
||||
if (rdev)
|
||||
|
@ -1448,7 +1448,7 @@ static void unplug_slaves(mddev_t *mddev)
|
|||
|
||||
rcu_read_lock();
|
||||
for (i=0; i<mddev->raid_disks; i++) {
|
||||
mdk_rdev_t *rdev = conf->disks[i].rdev;
|
||||
mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
|
||||
if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) {
|
||||
request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
|
||||
|
||||
|
@ -1493,7 +1493,7 @@ static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk,
|
|||
|
||||
rcu_read_lock();
|
||||
for (i=0; i<mddev->raid_disks && ret == 0; i++) {
|
||||
mdk_rdev_t *rdev = conf->disks[i].rdev;
|
||||
mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
|
||||
if (rdev && !rdev->faulty) {
|
||||
struct block_device *bdev = rdev->bdev;
|
||||
request_queue_t *r_queue = bdev_get_queue(bdev);
|
||||
|
@ -2165,7 +2165,7 @@ static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
|
|||
found = 1;
|
||||
if (rdev->saved_raid_disk != disk)
|
||||
conf->fullsync = 1;
|
||||
p->rdev = rdev;
|
||||
rcu_assign_pointer(p->rdev, rdev);
|
||||
break;
|
||||
}
|
||||
print_raid5_conf(conf);
|
||||
|
|
|
@ -1464,7 +1464,7 @@ static void handle_stripe(struct stripe_head *sh)
|
|||
bi->bi_end_io = raid6_end_read_request;
|
||||
|
||||
rcu_read_lock();
|
||||
rdev = conf->disks[i].rdev;
|
||||
rdev = rcu_dereference(conf->disks[i].rdev);
|
||||
if (rdev && rdev->faulty)
|
||||
rdev = NULL;
|
||||
if (rdev)
|
||||
|
@ -1538,7 +1538,7 @@ static void unplug_slaves(mddev_t *mddev)
|
|||
|
||||
rcu_read_lock();
|
||||
for (i=0; i<mddev->raid_disks; i++) {
|
||||
mdk_rdev_t *rdev = conf->disks[i].rdev;
|
||||
mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
|
||||
if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) {
|
||||
request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
|
||||
|
||||
|
@ -1583,7 +1583,7 @@ static int raid6_issue_flush(request_queue_t *q, struct gendisk *disk,
|
|||
|
||||
rcu_read_lock();
|
||||
for (i=0; i<mddev->raid_disks && ret == 0; i++) {
|
||||
mdk_rdev_t *rdev = conf->disks[i].rdev;
|
||||
mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev);
|
||||
if (rdev && !rdev->faulty) {
|
||||
struct block_device *bdev = rdev->bdev;
|
||||
request_queue_t *r_queue = bdev_get_queue(bdev);
|
||||
|
@ -2154,7 +2154,7 @@ static int raid6_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
|
|||
found = 1;
|
||||
if (rdev->saved_raid_disk != disk)
|
||||
conf->fullsync = 1;
|
||||
p->rdev = rdev;
|
||||
rcu_assign_pointer(p->rdev, rdev);
|
||||
break;
|
||||
}
|
||||
print_raid6_conf(conf);
|
||||
|
|
Loading…
Reference in New Issue
Block a user