block: use bio_alloc_bioset for passthru IO by default

Use bio_alloc_bioset for passthru IO by default, so that we can enable
bio cache for irq and polled passthru IO in later.

Signed-off-by: Fengnan Chang <changfengnan@bytedance.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
master
Fengnan Chang 2025-11-14 17:21:48 +08:00 committed by Jens Axboe
parent 559e608c46
commit 05ce4c584c
2 changed files with 37 additions and 55 deletions

View File

@ -37,6 +37,25 @@ static struct bio_map_data *bio_alloc_map_data(struct iov_iter *data,
return bmd;
}
static inline void blk_mq_map_bio_put(struct bio *bio)
{
bio_put(bio);
}
static struct bio *blk_rq_map_bio_alloc(struct request *rq,
unsigned int nr_vecs, gfp_t gfp_mask)
{
struct block_device *bdev = rq->q->disk ? rq->q->disk->part0 : NULL;
struct bio *bio;
bio = bio_alloc_bioset(bdev, nr_vecs, rq->cmd_flags, gfp_mask,
&fs_bio_set);
if (!bio)
return NULL;
return bio;
}
/**
* bio_copy_from_iter - copy all pages from iov_iter to bio
* @bio: The &struct bio which describes the I/O as destination
@ -154,10 +173,9 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
nr_pages = bio_max_segs(DIV_ROUND_UP(offset + len, PAGE_SIZE));
ret = -ENOMEM;
bio = bio_kmalloc(nr_pages, gfp_mask);
bio = blk_rq_map_bio_alloc(rq, nr_pages, gfp_mask);
if (!bio)
goto out_bmd;
bio_init_inline(bio, NULL, nr_pages, req_op(rq));
if (map_data) {
nr_pages = 1U << map_data->page_order;
@ -233,43 +251,12 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
cleanup:
if (!map_data)
bio_free_pages(bio);
bio_uninit(bio);
kfree(bio);
blk_mq_map_bio_put(bio);
out_bmd:
kfree(bmd);
return ret;
}
static void blk_mq_map_bio_put(struct bio *bio)
{
if (bio->bi_opf & REQ_ALLOC_CACHE) {
bio_put(bio);
} else {
bio_uninit(bio);
kfree(bio);
}
}
static struct bio *blk_rq_map_bio_alloc(struct request *rq,
unsigned int nr_vecs, gfp_t gfp_mask)
{
struct block_device *bdev = rq->q->disk ? rq->q->disk->part0 : NULL;
struct bio *bio;
if (rq->cmd_flags & REQ_ALLOC_CACHE && (nr_vecs <= BIO_INLINE_VECS)) {
bio = bio_alloc_bioset(bdev, nr_vecs, rq->cmd_flags, gfp_mask,
&fs_bio_set);
if (!bio)
return NULL;
} else {
bio = bio_kmalloc(nr_vecs, gfp_mask);
if (!bio)
return NULL;
bio_init_inline(bio, bdev, nr_vecs, req_op(rq));
}
return bio;
}
static int bio_map_user_iov(struct request *rq, struct iov_iter *iter,
gfp_t gfp_mask)
{
@ -318,25 +305,23 @@ static void bio_invalidate_vmalloc_pages(struct bio *bio)
static void bio_map_kern_endio(struct bio *bio)
{
bio_invalidate_vmalloc_pages(bio);
bio_uninit(bio);
kfree(bio);
blk_mq_map_bio_put(bio);
}
static struct bio *bio_map_kern(void *data, unsigned int len, enum req_op op,
static struct bio *bio_map_kern(struct request *rq, void *data, unsigned int len,
gfp_t gfp_mask)
{
unsigned int nr_vecs = bio_add_max_vecs(data, len);
struct bio *bio;
bio = bio_kmalloc(nr_vecs, gfp_mask);
bio = blk_rq_map_bio_alloc(rq, nr_vecs, gfp_mask);
if (!bio)
return ERR_PTR(-ENOMEM);
bio_init_inline(bio, NULL, nr_vecs, op);
if (is_vmalloc_addr(data)) {
bio->bi_private = data;
if (!bio_add_vmalloc(bio, data, len)) {
bio_uninit(bio);
kfree(bio);
blk_mq_map_bio_put(bio);
return ERR_PTR(-EINVAL);
}
} else {
@ -349,8 +334,7 @@ static struct bio *bio_map_kern(void *data, unsigned int len, enum req_op op,
static void bio_copy_kern_endio(struct bio *bio)
{
bio_free_pages(bio);
bio_uninit(bio);
kfree(bio);
blk_mq_map_bio_put(bio);
}
static void bio_copy_kern_endio_read(struct bio *bio)
@ -369,6 +353,7 @@ static void bio_copy_kern_endio_read(struct bio *bio)
/**
* bio_copy_kern - copy kernel address into bio
* @rq: request to fill
* @data: pointer to buffer to copy
* @len: length in bytes
* @op: bio/request operation
@ -377,9 +362,10 @@ static void bio_copy_kern_endio_read(struct bio *bio)
* copy the kernel address into a bio suitable for io to a block
* device. Returns an error pointer in case of error.
*/
static struct bio *bio_copy_kern(void *data, unsigned int len, enum req_op op,
static struct bio *bio_copy_kern(struct request *rq, void *data, unsigned int len,
gfp_t gfp_mask)
{
enum req_op op = req_op(rq);
unsigned long kaddr = (unsigned long)data;
unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
unsigned long start = kaddr >> PAGE_SHIFT;
@ -394,10 +380,9 @@ static struct bio *bio_copy_kern(void *data, unsigned int len, enum req_op op,
return ERR_PTR(-EINVAL);
nr_pages = end - start;
bio = bio_kmalloc(nr_pages, gfp_mask);
bio = blk_rq_map_bio_alloc(rq, nr_pages, gfp_mask);
if (!bio)
return ERR_PTR(-ENOMEM);
bio_init_inline(bio, NULL, nr_pages, op);
while (len) {
struct page *page;
@ -431,8 +416,7 @@ static struct bio *bio_copy_kern(void *data, unsigned int len, enum req_op op,
cleanup:
bio_free_pages(bio);
bio_uninit(bio);
kfree(bio);
blk_mq_map_bio_put(bio);
return ERR_PTR(-ENOMEM);
}
@ -679,18 +663,16 @@ int blk_rq_map_kern(struct request *rq, void *kbuf, unsigned int len,
return -EINVAL;
if (!blk_rq_aligned(rq->q, addr, len) || object_is_on_stack(kbuf))
bio = bio_copy_kern(kbuf, len, req_op(rq), gfp_mask);
bio = bio_copy_kern(rq, kbuf, len, gfp_mask);
else
bio = bio_map_kern(kbuf, len, req_op(rq), gfp_mask);
bio = bio_map_kern(rq, kbuf, len, gfp_mask);
if (IS_ERR(bio))
return PTR_ERR(bio);
ret = blk_rq_append_bio(rq, bio);
if (unlikely(ret)) {
bio_uninit(bio);
kfree(bio);
}
if (unlikely(ret))
blk_mq_map_bio_put(bio);
return ret;
}
EXPORT_SYMBOL(blk_rq_map_kern);

View File

@ -447,7 +447,7 @@ static int nvme_uring_cmd_io(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
struct iov_iter iter;
struct iov_iter *map_iter = NULL;
struct request *req;
blk_opf_t rq_flags = REQ_ALLOC_CACHE;
blk_opf_t rq_flags = 0;
blk_mq_req_flags_t blk_flags = 0;
int ret;