blk-mq-dma: provide the bio_vec array being iterated

This will make it easier to add different sources of the bvec array,
like for upcoming integrity support, rather than assume to use the bio's
bi_io_vec. It also makes iterating "special" payloads more in common
with iterating normal payloads.

Signed-off-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Kanchan Joshi <joshi.k@samsung.com>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Link: https://lore.kernel.org/r/20250813153153.3260897-3-kbusch@meta.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
pull/1354/merge
Keith Busch 2025-08-13 08:31:46 -07:00 committed by Jens Axboe
parent 7a6fc1634c
commit dae75dead2
2 changed files with 34 additions and 23 deletions

View File

@ -16,23 +16,14 @@ static bool blk_map_iter_next(struct request *req, struct blk_map_iter *iter,
unsigned int max_size; unsigned int max_size;
struct bio_vec bv; struct bio_vec bv;
if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
if (!iter->bio)
return false;
vec->paddr = bvec_phys(&req->special_vec);
vec->len = req->special_vec.bv_len;
iter->bio = NULL;
return true;
}
if (!iter->iter.bi_size) if (!iter->iter.bi_size)
return false; return false;
bv = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter); bv = mp_bvec_iter_bvec(iter->bvecs, iter->iter);
vec->paddr = bvec_phys(&bv); vec->paddr = bvec_phys(&bv);
max_size = get_max_segment_size(&req->q->limits, vec->paddr, UINT_MAX); max_size = get_max_segment_size(&req->q->limits, vec->paddr, UINT_MAX);
bv.bv_len = min(bv.bv_len, max_size); bv.bv_len = min(bv.bv_len, max_size);
bio_advance_iter_single(iter->bio, &iter->iter, bv.bv_len); bvec_iter_advance_single(iter->bvecs, &iter->iter, bv.bv_len);
/* /*
* If we are entirely done with this bi_io_vec entry, check if the next * If we are entirely done with this bi_io_vec entry, check if the next
@ -43,19 +34,20 @@ static bool blk_map_iter_next(struct request *req, struct blk_map_iter *iter,
struct bio_vec next; struct bio_vec next;
if (!iter->iter.bi_size) { if (!iter->iter.bi_size) {
if (!iter->bio->bi_next) if (!iter->bio || !iter->bio->bi_next)
break; break;
iter->bio = iter->bio->bi_next; iter->bio = iter->bio->bi_next;
iter->iter = iter->bio->bi_iter; iter->iter = iter->bio->bi_iter;
iter->bvecs = iter->bio->bi_io_vec;
} }
next = mp_bvec_iter_bvec(iter->bio->bi_io_vec, iter->iter); next = mp_bvec_iter_bvec(iter->bvecs, iter->iter);
if (bv.bv_len + next.bv_len > max_size || if (bv.bv_len + next.bv_len > max_size ||
!biovec_phys_mergeable(req->q, &bv, &next)) !biovec_phys_mergeable(req->q, &bv, &next))
break; break;
bv.bv_len += next.bv_len; bv.bv_len += next.bv_len;
bio_advance_iter_single(iter->bio, &iter->iter, next.bv_len); bvec_iter_advance_single(iter->bvecs, &iter->iter, next.bv_len);
} }
vec->len = bv.bv_len; vec->len = bv.bv_len;
@ -125,6 +117,30 @@ static bool blk_rq_dma_map_iova(struct request *req, struct device *dma_dev,
return true; return true;
} }
static inline void blk_rq_map_iter_init(struct request *rq,
struct blk_map_iter *iter)
{
struct bio *bio = rq->bio;
if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) {
*iter = (struct blk_map_iter) {
.bvecs = &rq->special_vec,
.iter = {
.bi_size = rq->special_vec.bv_len,
}
};
} else if (bio) {
*iter = (struct blk_map_iter) {
.bio = bio,
.bvecs = bio->bi_io_vec,
.iter = bio->bi_iter,
};
} else {
/* the internal flush request may not have bio attached */
*iter = (struct blk_map_iter) {};
}
}
/** /**
* blk_rq_dma_map_iter_start - map the first DMA segment for a request * blk_rq_dma_map_iter_start - map the first DMA segment for a request
* @req: request to map * @req: request to map
@ -153,8 +169,7 @@ bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev,
unsigned int total_len = blk_rq_payload_bytes(req); unsigned int total_len = blk_rq_payload_bytes(req);
struct phys_vec vec; struct phys_vec vec;
iter->iter.bio = req->bio; blk_rq_map_iter_init(req, &iter->iter);
iter->iter.iter = req->bio->bi_iter;
memset(&iter->p2pdma, 0, sizeof(iter->p2pdma)); memset(&iter->p2pdma, 0, sizeof(iter->p2pdma));
iter->status = BLK_STS_OK; iter->status = BLK_STS_OK;
@ -246,16 +261,11 @@ blk_next_sg(struct scatterlist **sg, struct scatterlist *sglist)
int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist, int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist,
struct scatterlist **last_sg) struct scatterlist **last_sg)
{ {
struct blk_map_iter iter = { struct blk_map_iter iter;
.bio = rq->bio,
};
struct phys_vec vec; struct phys_vec vec;
int nsegs = 0; int nsegs = 0;
/* the internal flush request may not have bio attached */ blk_rq_map_iter_init(rq, &iter);
if (iter.bio)
iter.iter = iter.bio->bi_iter;
while (blk_map_iter_next(rq, &iter, &vec)) { while (blk_map_iter_next(rq, &iter, &vec)) {
*last_sg = blk_next_sg(last_sg, sglist); *last_sg = blk_next_sg(last_sg, sglist);
sg_set_page(*last_sg, phys_to_page(vec.paddr), vec.len, sg_set_page(*last_sg, phys_to_page(vec.paddr), vec.len,

View File

@ -8,6 +8,7 @@
struct blk_map_iter { struct blk_map_iter {
struct bvec_iter iter; struct bvec_iter iter;
struct bio *bio; struct bio *bio;
struct bio_vec *bvecs;
}; };
struct blk_dma_iter { struct blk_dma_iter {