btrfs: use bio_for_each_segment_all in __btrfsic_submit_bio

And remove the bogus check for a NULL return value from kmap, which
can't happen.  While we're at it: I don't think that kmapping up to 256
will work without deadlocks on highmem machines, a better idea would
be to use vm_map_ram to map all of them into a single virtual address
range.  Incidentally that would also simplify the code a lot.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: David Sterba <dsterba@suse.com>
This commit is contained in:
Christoph Hellwig 2016-11-25 09:07:53 +01:00 committed by David Sterba
parent 4989d277eb
commit 1621f8f3f9

View File

@ -2819,10 +2819,11 @@ static void __btrfsic_submit_bio(struct bio *bio)
* btrfsic_mount(), this might return NULL */ * btrfsic_mount(), this might return NULL */
dev_state = btrfsic_dev_state_lookup(bio->bi_bdev); dev_state = btrfsic_dev_state_lookup(bio->bi_bdev);
if (NULL != dev_state && if (NULL != dev_state &&
(bio_op(bio) == REQ_OP_WRITE) && NULL != bio->bi_io_vec) { (bio_op(bio) == REQ_OP_WRITE) && bio_has_data(bio)) {
unsigned int i; unsigned int i;
u64 dev_bytenr; u64 dev_bytenr;
u64 cur_bytenr; u64 cur_bytenr;
struct bio_vec *bvec;
int bio_is_patched; int bio_is_patched;
char **mapped_datav; char **mapped_datav;
@ -2840,32 +2841,23 @@ static void __btrfsic_submit_bio(struct bio *bio)
if (!mapped_datav) if (!mapped_datav)
goto leave; goto leave;
cur_bytenr = dev_bytenr; cur_bytenr = dev_bytenr;
for (i = 0; i < bio->bi_vcnt; i++) {
BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_SIZE); bio_for_each_segment_all(bvec, bio, i) {
mapped_datav[i] = kmap(bio->bi_io_vec[i].bv_page); BUG_ON(bvec->bv_len != PAGE_SIZE);
if (!mapped_datav[i]) { mapped_datav[i] = kmap(bvec->bv_page);
while (i > 0) {
i--;
kunmap(bio->bi_io_vec[i].bv_page);
}
kfree(mapped_datav);
goto leave;
}
if (dev_state->state->print_mask & if (dev_state->state->print_mask &
BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE) BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH_VERBOSE)
pr_info("#%u: bytenr=%llu, len=%u, offset=%u\n", pr_info("#%u: bytenr=%llu, len=%u, offset=%u\n",
i, cur_bytenr, bio->bi_io_vec[i].bv_len, i, cur_bytenr, bvec->bv_len, bvec->bv_offset);
bio->bi_io_vec[i].bv_offset); cur_bytenr += bvec->bv_len;
cur_bytenr += bio->bi_io_vec[i].bv_len;
} }
btrfsic_process_written_block(dev_state, dev_bytenr, btrfsic_process_written_block(dev_state, dev_bytenr,
mapped_datav, bio->bi_vcnt, mapped_datav, bio->bi_vcnt,
bio, &bio_is_patched, bio, &bio_is_patched,
NULL, bio->bi_opf); NULL, bio->bi_opf);
while (i > 0) { bio_for_each_segment_all(bvec, bio, i)
i--; kunmap(bvec->bv_page);
kunmap(bio->bi_io_vec[i].bv_page);
}
kfree(mapped_datav); kfree(mapped_datav);
} else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) { } else if (NULL != dev_state && (bio->bi_opf & REQ_PREFLUSH)) {
if (dev_state->state->print_mask & if (dev_state->state->print_mask &