ublk: use rq_for_each_segment() for user copy
ublk_advance_io_iter() and ublk_copy_io_pages() currently open-code the iteration over the request's bvecs. Switch to the rq_for_each_segment() macro provided by blk-mq to avoid reaching into the bio internals and simplify the code. Suggested-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Caleb Sander Mateos <csander@purestorage.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>pull/1354/merge
parent
2299ceec36
commit
e87d66ab27
|
|
@ -913,22 +913,29 @@ static const struct block_device_operations ub_fops = {
|
||||||
.report_zones = ublk_report_zones,
|
.report_zones = ublk_report_zones,
|
||||||
};
|
};
|
||||||
|
|
||||||
struct ublk_io_iter {
|
/*
|
||||||
struct bio *bio;
|
* Copy data between request pages and io_iter, and 'offset'
|
||||||
struct bvec_iter iter;
|
* is the start point of linear offset of request.
|
||||||
};
|
*/
|
||||||
|
static size_t ublk_copy_user_pages(const struct request *req,
|
||||||
/* return how many bytes are copied */
|
unsigned offset, struct iov_iter *uiter, int dir)
|
||||||
static size_t ublk_copy_io_pages(struct ublk_io_iter *data,
|
|
||||||
struct iov_iter *uiter, int dir)
|
|
||||||
{
|
{
|
||||||
|
struct req_iterator iter;
|
||||||
|
struct bio_vec bv;
|
||||||
size_t done = 0;
|
size_t done = 0;
|
||||||
|
|
||||||
for (;;) {
|
rq_for_each_segment(bv, req, iter) {
|
||||||
struct bio_vec bv = bio_iter_iovec(data->bio, data->iter);
|
void *bv_buf;
|
||||||
void *bv_buf = bvec_kmap_local(&bv);
|
|
||||||
size_t copied;
|
size_t copied;
|
||||||
|
|
||||||
|
if (offset >= bv.bv_len) {
|
||||||
|
offset -= bv.bv_len;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
bv.bv_offset += offset;
|
||||||
|
bv.bv_len -= offset;
|
||||||
|
bv_buf = bvec_kmap_local(&bv);
|
||||||
if (dir == ITER_DEST)
|
if (dir == ITER_DEST)
|
||||||
copied = copy_to_iter(bv_buf, bv.bv_len, uiter);
|
copied = copy_to_iter(bv_buf, bv.bv_len, uiter);
|
||||||
else
|
else
|
||||||
|
|
@ -940,50 +947,11 @@ static size_t ublk_copy_io_pages(struct ublk_io_iter *data,
|
||||||
if (copied < bv.bv_len)
|
if (copied < bv.bv_len)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
/* advance bio */
|
offset = 0;
|
||||||
bio_advance_iter_single(data->bio, &data->iter, copied);
|
|
||||||
if (!data->iter.bi_size) {
|
|
||||||
data->bio = data->bio->bi_next;
|
|
||||||
if (data->bio == NULL)
|
|
||||||
break;
|
|
||||||
data->iter = data->bio->bi_iter;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return done;
|
return done;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool ublk_advance_io_iter(const struct request *req,
|
|
||||||
struct ublk_io_iter *iter, unsigned int offset)
|
|
||||||
{
|
|
||||||
struct bio *bio = req->bio;
|
|
||||||
|
|
||||||
for_each_bio(bio) {
|
|
||||||
if (bio->bi_iter.bi_size > offset) {
|
|
||||||
iter->bio = bio;
|
|
||||||
iter->iter = bio->bi_iter;
|
|
||||||
bio_advance_iter(iter->bio, &iter->iter, offset);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
offset -= bio->bi_iter.bi_size;
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Copy data between request pages and io_iter, and 'offset'
|
|
||||||
* is the start point of linear offset of request.
|
|
||||||
*/
|
|
||||||
static size_t ublk_copy_user_pages(const struct request *req,
|
|
||||||
unsigned offset, struct iov_iter *uiter, int dir)
|
|
||||||
{
|
|
||||||
struct ublk_io_iter iter;
|
|
||||||
|
|
||||||
if (!ublk_advance_io_iter(req, &iter, offset))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
return ublk_copy_io_pages(&iter, uiter, dir);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool ublk_need_map_req(const struct request *req)
|
static inline bool ublk_need_map_req(const struct request *req)
|
||||||
{
|
{
|
||||||
return ublk_rq_has_data(req) && req_op(req) == REQ_OP_WRITE;
|
return ublk_rq_has_data(req) && req_op(req) == REQ_OP_WRITE;
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue