io_uring/rw: defer reg buf vec import

Import registered buffers for vectored reads and writes later at issue
time as we now do for other fixed ops.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/e8491c976e4ab83a4e3dc428e9fe7555e59583b8.1741362889.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
pull/1188/head
Pavel Begunkov 2025-03-07 16:00:32 +00:00 committed by Jens Axboe
parent bdabba04bb
commit 835c4bdf95
2 changed files with 37 additions and 8 deletions

View File

@ -502,6 +502,7 @@ enum {
REQ_F_BUFFERS_COMMIT_BIT,
REQ_F_BUF_NODE_BIT,
REQ_F_HAS_METADATA_BIT,
REQ_F_IMPORT_BUFFER_BIT,
/* not a real bit, just to check we're not overflowing the space */
__REQ_F_LAST_BIT,
@ -584,6 +585,8 @@ enum {
REQ_F_BUF_NODE = IO_REQ_FLAG(REQ_F_BUF_NODE_BIT),
/* request has read/write metadata assigned */
REQ_F_HAS_METADATA = IO_REQ_FLAG(REQ_F_HAS_METADATA_BIT),
/* resolve padded iovec to registered buffers */
REQ_F_IMPORT_BUFFER = IO_REQ_FLAG(REQ_F_IMPORT_BUFFER_BIT),
};
typedef void (*io_req_tw_func_t)(struct io_kiocb *req, io_tw_token_t tw);

View File

@ -381,7 +381,25 @@ int io_prep_write_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return __io_prep_rw(req, sqe, ITER_SOURCE);
}
static int io_rw_prep_reg_vec(struct io_kiocb *req, int ddir)
static int io_rw_import_reg_vec(struct io_kiocb *req,
struct io_async_rw *io,
int ddir, unsigned int issue_flags)
{
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
unsigned uvec_segs = rw->len;
unsigned iovec_off = io->vec.nr - uvec_segs;
int ret;
ret = io_import_reg_vec(ddir, &io->iter, req, &io->vec,
uvec_segs, iovec_off, issue_flags);
if (unlikely(ret))
return ret;
iov_iter_save_state(&io->iter, &io->iter_state);
req->flags &= ~REQ_F_IMPORT_BUFFER;
return 0;
}
static int io_rw_prep_reg_vec(struct io_kiocb *req)
{
struct io_rw *rw = io_kiocb_to_cmd(req, struct io_rw);
struct io_async_rw *io = req->async_data;
@ -406,10 +424,8 @@ static int io_rw_prep_reg_vec(struct io_kiocb *req, int ddir)
if (IS_ERR(res))
return PTR_ERR(res);
ret = io_import_reg_vec(ddir, &io->iter, req, &io->vec,
uvec_segs, iovec_off, 0);
iov_iter_save_state(&io->iter, &io->iter_state);
return ret;
req->flags |= REQ_F_IMPORT_BUFFER;
return 0;
}
int io_prep_readv_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
@ -419,7 +435,7 @@ int io_prep_readv_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
ret = __io_prep_rw(req, sqe, ITER_DEST);
if (unlikely(ret))
return ret;
return io_rw_prep_reg_vec(req, ITER_DEST);
return io_rw_prep_reg_vec(req);
}
int io_prep_writev_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
@ -429,7 +445,7 @@ int io_prep_writev_fixed(struct io_kiocb *req, const struct io_uring_sqe *sqe)
ret = __io_prep_rw(req, sqe, ITER_SOURCE);
if (unlikely(ret))
return ret;
return io_rw_prep_reg_vec(req, ITER_SOURCE);
return io_rw_prep_reg_vec(req);
}
/*
@ -906,7 +922,11 @@ static int __io_read(struct io_kiocb *req, unsigned int issue_flags)
ssize_t ret;
loff_t *ppos;
if (io_do_buffer_select(req)) {
if (req->flags & REQ_F_IMPORT_BUFFER) {
ret = io_rw_import_reg_vec(req, io, ITER_DEST, issue_flags);
if (unlikely(ret))
return ret;
} else if (io_do_buffer_select(req)) {
ret = io_import_rw_buffer(ITER_DEST, req, io, issue_flags);
if (unlikely(ret < 0))
return ret;
@ -1117,6 +1137,12 @@ int io_write(struct io_kiocb *req, unsigned int issue_flags)
ssize_t ret, ret2;
loff_t *ppos;
if (req->flags & REQ_F_IMPORT_BUFFER) {
ret = io_rw_import_reg_vec(req, io, ITER_SOURCE, issue_flags);
if (unlikely(ret))
return ret;
}
ret = io_rw_init_file(req, FMODE_WRITE, WRITE);
if (unlikely(ret))
return ret;