blk-mq-dma: move common dma start code to a helper

In preparing for dma mapping integrity metadata, move the common dma
setup to a helper.

Signed-off-by: Keith Busch <kbusch@kernel.org>
Reviewed-by: Kanchan Joshi <joshi.k@samsung.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com>
Link: https://lore.kernel.org/r/20250813153153.3260897-6-kbusch@meta.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
pull/1354/merge
Keith Busch 2025-08-13 08:31:49 -07:00 committed by Jens Axboe
parent 7092639031
commit e2be2ba6d2
1 changed files with 33 additions and 26 deletions

View File

@ -141,35 +141,12 @@ static inline void blk_rq_map_iter_init(struct request *rq,
} }
} }
/** static bool blk_dma_map_iter_start(struct request *req, struct device *dma_dev,
* blk_rq_dma_map_iter_start - map the first DMA segment for a request struct dma_iova_state *state, struct blk_dma_iter *iter,
* @req: request to map unsigned int total_len)
* @dma_dev: device to map to
* @state: DMA IOVA state
* @iter: block layer DMA iterator
*
* Start DMA mapping @req to @dma_dev. @state and @iter are provided by the
* caller and don't need to be initialized. @state needs to be stored for use
* at unmap time, @iter is only needed at map time.
*
* Returns %false if there is no segment to map, including due to an error, or
* %true ft it did map a segment.
*
* If a segment was mapped, the DMA address for it is returned in @iter.addr and
* the length in @iter.len. If no segment was mapped the status code is
* returned in @iter.status.
*
* The caller can call blk_rq_dma_map_coalesce() to check if further segments
* need to be mapped after this, or go straight to blk_rq_dma_map_iter_next()
* to try to map the following segments.
*/
bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev,
struct dma_iova_state *state, struct blk_dma_iter *iter)
{ {
unsigned int total_len = blk_rq_payload_bytes(req);
struct phys_vec vec; struct phys_vec vec;
blk_rq_map_iter_init(req, &iter->iter);
memset(&iter->p2pdma, 0, sizeof(iter->p2pdma)); memset(&iter->p2pdma, 0, sizeof(iter->p2pdma));
iter->status = BLK_STS_OK; iter->status = BLK_STS_OK;
@ -201,6 +178,36 @@ bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev,
return blk_rq_dma_map_iova(req, dma_dev, state, iter, &vec); return blk_rq_dma_map_iova(req, dma_dev, state, iter, &vec);
return blk_dma_map_direct(req, dma_dev, iter, &vec); return blk_dma_map_direct(req, dma_dev, iter, &vec);
} }
/**
* blk_rq_dma_map_iter_start - map the first DMA segment for a request
* @req: request to map
* @dma_dev: device to map to
* @state: DMA IOVA state
* @iter: block layer DMA iterator
*
* Start DMA mapping @req to @dma_dev. @state and @iter are provided by the
* caller and don't need to be initialized. @state needs to be stored for use
* at unmap time, @iter is only needed at map time.
*
* Returns %false if there is no segment to map, including due to an error, or
* %true ft it did map a segment.
*
* If a segment was mapped, the DMA address for it is returned in @iter.addr and
* the length in @iter.len. If no segment was mapped the status code is
* returned in @iter.status.
*
* The caller can call blk_rq_dma_map_coalesce() to check if further segments
* need to be mapped after this, or go straight to blk_rq_dma_map_iter_next()
* to try to map the following segments.
*/
bool blk_rq_dma_map_iter_start(struct request *req, struct device *dma_dev,
struct dma_iova_state *state, struct blk_dma_iter *iter)
{
blk_rq_map_iter_init(req, &iter->iter);
return blk_dma_map_iter_start(req, dma_dev, state, iter,
blk_rq_payload_bytes(req));
}
EXPORT_SYMBOL_GPL(blk_rq_dma_map_iter_start); EXPORT_SYMBOL_GPL(blk_rq_dma_map_iter_start);
/** /**