md/raid1: convert to use bio_submit_split_bioset()
Unify bio split code, and prepare to fix ordering of split IO. Noted that bio_submit_split_bioset() can fail the original bio directly by split error, set R1BIO_Returned in this case to notify raid_end_bio_io() that the original bio is returned already. Signed-off-by: Yu Kuai <yukuai3@huawei.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>pull/1354/merge
parent
5b38ee5a4a
commit
a6fcc160d6
|
|
@ -1317,7 +1317,7 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
|
|||
struct raid1_info *mirror;
|
||||
struct bio *read_bio;
|
||||
int max_sectors;
|
||||
int rdisk, error;
|
||||
int rdisk;
|
||||
bool r1bio_existed = !!r1_bio;
|
||||
|
||||
/*
|
||||
|
|
@ -1377,18 +1377,13 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
|
|||
}
|
||||
|
||||
if (max_sectors < bio_sectors(bio)) {
|
||||
struct bio *split = bio_split(bio, max_sectors,
|
||||
gfp, &conf->bio_split);
|
||||
|
||||
if (IS_ERR(split)) {
|
||||
error = PTR_ERR(split);
|
||||
bio = bio_submit_split_bioset(bio, max_sectors,
|
||||
&conf->bio_split);
|
||||
if (!bio) {
|
||||
set_bit(R1BIO_Returned, &r1_bio->state);
|
||||
goto err_handle;
|
||||
}
|
||||
|
||||
bio_chain(split, bio);
|
||||
trace_block_split(split, bio->bi_iter.bi_sector);
|
||||
submit_bio_noacct(bio);
|
||||
bio = split;
|
||||
r1_bio->master_bio = bio;
|
||||
r1_bio->sectors = max_sectors;
|
||||
}
|
||||
|
|
@ -1416,8 +1411,6 @@ static void raid1_read_request(struct mddev *mddev, struct bio *bio,
|
|||
|
||||
err_handle:
|
||||
atomic_dec(&mirror->rdev->nr_pending);
|
||||
bio->bi_status = errno_to_blk_status(error);
|
||||
set_bit(R1BIO_Uptodate, &r1_bio->state);
|
||||
raid_end_bio_io(r1_bio);
|
||||
}
|
||||
|
||||
|
|
@ -1484,7 +1477,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
|
|||
{
|
||||
struct r1conf *conf = mddev->private;
|
||||
struct r1bio *r1_bio;
|
||||
int i, disks, k, error;
|
||||
int i, disks, k;
|
||||
unsigned long flags;
|
||||
int first_clone;
|
||||
int max_sectors;
|
||||
|
|
@ -1588,10 +1581,8 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
|
|||
* complexity of supporting that is not worth
|
||||
* the benefit.
|
||||
*/
|
||||
if (bio->bi_opf & REQ_ATOMIC) {
|
||||
error = -EIO;
|
||||
if (bio->bi_opf & REQ_ATOMIC)
|
||||
goto err_handle;
|
||||
}
|
||||
|
||||
good_sectors = first_bad - r1_bio->sector;
|
||||
if (good_sectors < max_sectors)
|
||||
|
|
@ -1611,18 +1602,13 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
|
|||
max_sectors = min_t(int, max_sectors,
|
||||
BIO_MAX_VECS * (PAGE_SIZE >> 9));
|
||||
if (max_sectors < bio_sectors(bio)) {
|
||||
struct bio *split = bio_split(bio, max_sectors,
|
||||
GFP_NOIO, &conf->bio_split);
|
||||
|
||||
if (IS_ERR(split)) {
|
||||
error = PTR_ERR(split);
|
||||
bio = bio_submit_split_bioset(bio, max_sectors,
|
||||
&conf->bio_split);
|
||||
if (!bio) {
|
||||
set_bit(R1BIO_Returned, &r1_bio->state);
|
||||
goto err_handle;
|
||||
}
|
||||
|
||||
bio_chain(split, bio);
|
||||
trace_block_split(split, bio->bi_iter.bi_sector);
|
||||
submit_bio_noacct(bio);
|
||||
bio = split;
|
||||
r1_bio->master_bio = bio;
|
||||
r1_bio->sectors = max_sectors;
|
||||
}
|
||||
|
|
@ -1698,8 +1684,6 @@ err_handle:
|
|||
}
|
||||
}
|
||||
|
||||
bio->bi_status = errno_to_blk_status(error);
|
||||
set_bit(R1BIO_Uptodate, &r1_bio->state);
|
||||
raid_end_bio_io(r1_bio);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -178,7 +178,9 @@ enum r1bio_state {
|
|||
* any write was successful. Otherwise we call when
|
||||
* any write-behind write succeeds, otherwise we call
|
||||
* with failure when last write completes (and all failed).
|
||||
* Record that bi_end_io was called with this flag...
|
||||
*
|
||||
* And for bio_split errors, record that bi_end_io was called
|
||||
* with this flag...
|
||||
*/
|
||||
R1BIO_Returned,
|
||||
/* If a write for this request means we can clear some
|
||||
|
|
|
|||
Loading…
Reference in New Issue