io_uring/kbuf: pass in struct io_buffer_list to commit/recycle helpers

Rather than have this implied being in the io_kiocb, pass it in directly
so it's immediately obvious where these users of ->buf_list are coming
from.

Link: https://lore.kernel.org/r/20250821020750.598432-6-axboe@kernel.dk
Signed-off-by: Jens Axboe <axboe@kernel.dk>
pull/1354/merge
Jens Axboe 2025-08-20 20:03:33 -06:00
parent b22743f29b
commit 1b5add75d7
6 changed files with 45 additions and 40 deletions

View File

@ -1007,7 +1007,7 @@ void io_req_defer_failed(struct io_kiocb *req, s32 res)
lockdep_assert_held(&req->ctx->uring_lock);
req_set_fail(req);
io_req_set_res(req, res, io_put_kbuf(req, res));
io_req_set_res(req, res, io_put_kbuf(req, res, req->buf_list));
if (def->fail)
def->fail(req);
io_req_complete_defer(req);
@ -2025,11 +2025,11 @@ fail:
switch (io_arm_poll_handler(req, 0)) {
case IO_APOLL_READY:
io_kbuf_recycle(req, 0);
io_kbuf_recycle(req, req->buf_list, 0);
io_req_task_queue(req);
break;
case IO_APOLL_ABORTED:
io_kbuf_recycle(req, 0);
io_kbuf_recycle(req, req->buf_list, 0);
io_queue_iowq(req);
break;
case IO_APOLL_OK:

View File

@ -354,9 +354,9 @@ int io_buffers_peek(struct io_kiocb *req, struct buf_sel_arg *arg)
return io_provided_buffers_select(req, &arg->max_len, bl, arg->iovs);
}
static inline bool __io_put_kbuf_ring(struct io_kiocb *req, int len, int nr)
static inline bool __io_put_kbuf_ring(struct io_kiocb *req,
struct io_buffer_list *bl, int len, int nr)
{
struct io_buffer_list *bl = req->buf_list;
bool ret = true;
if (bl)
@ -366,7 +366,8 @@ static inline bool __io_put_kbuf_ring(struct io_kiocb *req, int len, int nr)
return ret;
}
unsigned int __io_put_kbufs(struct io_kiocb *req, int len, int nbufs)
unsigned int __io_put_kbufs(struct io_kiocb *req, struct io_buffer_list *bl,
int len, int nbufs)
{
unsigned int ret;
@ -377,7 +378,7 @@ unsigned int __io_put_kbufs(struct io_kiocb *req, int len, int nbufs)
return ret;
}
if (!__io_put_kbuf_ring(req, len, nbufs))
if (!__io_put_kbuf_ring(req, bl, len, nbufs))
ret |= IORING_CQE_F_BUF_MORE;
return ret;
}

View File

@ -80,14 +80,16 @@ int io_register_pbuf_status(struct io_ring_ctx *ctx, void __user *arg);
bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags);
void io_kbuf_drop_legacy(struct io_kiocb *req);
unsigned int __io_put_kbufs(struct io_kiocb *req, int len, int nbufs);
unsigned int __io_put_kbufs(struct io_kiocb *req, struct io_buffer_list *bl,
int len, int nbufs);
bool io_kbuf_commit(struct io_kiocb *req,
struct io_buffer_list *bl, int len, int nr);
struct io_mapped_region *io_pbuf_get_region(struct io_ring_ctx *ctx,
unsigned int bgid);
static inline bool io_kbuf_recycle_ring(struct io_kiocb *req)
static inline bool io_kbuf_recycle_ring(struct io_kiocb *req,
struct io_buffer_list *bl)
{
/*
* We don't need to recycle for REQ_F_BUFFER_RING, we can just clear
@ -96,7 +98,7 @@ static inline bool io_kbuf_recycle_ring(struct io_kiocb *req)
* The exception is partial io, that case we should increment bl->head
* to monopolize the buffer.
*/
if (req->buf_list) {
if (bl) {
req->flags &= ~(REQ_F_BUFFER_RING|REQ_F_BUFFERS_COMMIT);
return true;
}
@ -110,29 +112,31 @@ static inline bool io_do_buffer_select(struct io_kiocb *req)
return !(req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING));
}
static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
static inline bool io_kbuf_recycle(struct io_kiocb *req, struct io_buffer_list *bl,
unsigned issue_flags)
{
if (req->flags & REQ_F_BL_NO_RECYCLE)
return false;
if (req->flags & REQ_F_BUFFER_SELECTED)
return io_kbuf_recycle_legacy(req, issue_flags);
if (req->flags & REQ_F_BUFFER_RING)
return io_kbuf_recycle_ring(req);
return io_kbuf_recycle_ring(req, bl);
return false;
}
static inline unsigned int io_put_kbuf(struct io_kiocb *req, int len)
static inline unsigned int io_put_kbuf(struct io_kiocb *req, int len,
struct io_buffer_list *bl)
{
if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
return 0;
return __io_put_kbufs(req, len, 1);
return __io_put_kbufs(req, bl, len, 1);
}
static inline unsigned int io_put_kbufs(struct io_kiocb *req, int len,
int nbufs)
struct io_buffer_list *bl, int nbufs)
{
if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
return 0;
return __io_put_kbufs(req, len, nbufs);
return __io_put_kbufs(req, bl, len, nbufs);
}
#endif

View File

@ -494,12 +494,12 @@ static int io_bundle_nbufs(struct io_async_msghdr *kmsg, int ret)
return nbufs;
}
static int io_net_kbuf_recyle(struct io_kiocb *req,
static int io_net_kbuf_recyle(struct io_kiocb *req, struct io_buffer_list *bl,
struct io_async_msghdr *kmsg, int len)
{
req->flags |= REQ_F_BL_NO_RECYCLE;
if (req->flags & REQ_F_BUFFERS_COMMIT)
io_kbuf_commit(req, req->buf_list, len, io_bundle_nbufs(kmsg, len));
io_kbuf_commit(req, bl, len, io_bundle_nbufs(kmsg, len));
return IOU_RETRY;
}
@ -511,11 +511,11 @@ static inline bool io_send_finish(struct io_kiocb *req, int *ret,
unsigned int cflags;
if (!(sr->flags & IORING_RECVSEND_BUNDLE)) {
cflags = io_put_kbuf(req, *ret);
cflags = io_put_kbuf(req, *ret, req->buf_list);
goto finish;
}
cflags = io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret));
cflags = io_put_kbufs(req, *ret, req->buf_list, io_bundle_nbufs(kmsg, *ret));
if (bundle_finished || req->flags & REQ_F_BL_EMPTY)
goto finish;
@ -681,7 +681,7 @@ retry_bundle:
sr->len -= ret;
sr->buf += ret;
sr->done_io += ret;
return io_net_kbuf_recyle(req, kmsg, ret);
return io_net_kbuf_recyle(req, req->buf_list, kmsg, ret);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
@ -871,7 +871,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
if (sr->flags & IORING_RECVSEND_BUNDLE) {
size_t this_ret = *ret - sr->done_io;
cflags |= io_put_kbufs(req, this_ret, io_bundle_nbufs(kmsg, this_ret));
cflags |= io_put_kbufs(req, this_ret, req->buf_list, io_bundle_nbufs(kmsg, this_ret));
if (sr->flags & IORING_RECV_RETRY)
cflags = req->cqe.flags | (cflags & CQE_F_MASK);
if (sr->mshot_len && *ret >= sr->mshot_len)
@ -893,7 +893,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
return false;
}
} else {
cflags |= io_put_kbuf(req, *ret);
cflags |= io_put_kbuf(req, *ret, req->buf_list);
}
/*
@ -1045,7 +1045,7 @@ retry_multishot:
if (req->flags & REQ_F_APOLL_MULTISHOT) {
ret = io_recvmsg_prep_multishot(kmsg, sr, &buf, &len);
if (ret) {
io_kbuf_recycle(req, issue_flags);
io_kbuf_recycle(req, req->buf_list, issue_flags);
return ret;
}
}
@ -1070,13 +1070,13 @@ retry_multishot:
if (ret < min_ret) {
if (ret == -EAGAIN && force_nonblock) {
if (issue_flags & IO_URING_F_MULTISHOT)
io_kbuf_recycle(req, issue_flags);
io_kbuf_recycle(req, req->buf_list, issue_flags);
return IOU_RETRY;
}
if (ret > 0 && io_net_retry(sock, flags)) {
sr->done_io += ret;
return io_net_kbuf_recyle(req, kmsg, ret);
return io_net_kbuf_recyle(req, req->buf_list, kmsg, ret);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
@ -1090,7 +1090,7 @@ retry_multishot:
else if (sr->done_io)
ret = sr->done_io;
else
io_kbuf_recycle(req, issue_flags);
io_kbuf_recycle(req, req->buf_list, issue_flags);
if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags))
goto retry_multishot;
@ -1214,7 +1214,7 @@ retry_multishot:
if (ret < min_ret) {
if (ret == -EAGAIN && force_nonblock) {
if (issue_flags & IO_URING_F_MULTISHOT)
io_kbuf_recycle(req, issue_flags);
io_kbuf_recycle(req, req->buf_list, issue_flags);
return IOU_RETRY;
}
@ -1222,7 +1222,7 @@ retry_multishot:
sr->len -= ret;
sr->buf += ret;
sr->done_io += ret;
return io_net_kbuf_recyle(req, kmsg, ret);
return io_net_kbuf_recyle(req, req->buf_list, kmsg, ret);
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
@ -1238,7 +1238,7 @@ out_free:
else if (sr->done_io)
ret = sr->done_io;
else
io_kbuf_recycle(req, issue_flags);
io_kbuf_recycle(req, req->buf_list, issue_flags);
if (!io_recv_finish(req, &ret, kmsg, mshot_finished, issue_flags))
goto retry_multishot;

View File

@ -316,10 +316,10 @@ void io_poll_task_func(struct io_kiocb *req, io_tw_token_t tw)
ret = io_poll_check_events(req, tw);
if (ret == IOU_POLL_NO_ACTION) {
io_kbuf_recycle(req, 0);
io_kbuf_recycle(req, req->buf_list, 0);
return;
} else if (ret == IOU_POLL_REQUEUE) {
io_kbuf_recycle(req, 0);
io_kbuf_recycle(req, req->buf_list, 0);
__io_poll_execute(req, 0);
return;
}
@ -686,7 +686,7 @@ int io_arm_apoll(struct io_kiocb *req, unsigned issue_flags, __poll_t mask)
req->flags |= REQ_F_POLLED;
ipt.pt._qproc = io_async_queue_proc;
io_kbuf_recycle(req, issue_flags);
io_kbuf_recycle(req, req->buf_list, issue_flags);
ret = __io_arm_poll_handler(req, &apoll->poll, &ipt, mask, issue_flags);
if (ret)

View File

@ -576,7 +576,7 @@ void io_req_rw_complete(struct io_kiocb *req, io_tw_token_t tw)
io_req_io_end(req);
if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))
req->cqe.flags |= io_put_kbuf(req, req->cqe.res);
req->cqe.flags |= io_put_kbuf(req, req->cqe.res, req->buf_list);
io_req_rw_cleanup(req, 0);
io_req_task_complete(req, tw);
@ -659,7 +659,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
* from the submission path.
*/
io_req_io_end(req);
io_req_set_res(req, final_ret, io_put_kbuf(req, ret));
io_req_set_res(req, final_ret, io_put_kbuf(req, ret, req->buf_list));
io_req_rw_cleanup(req, issue_flags);
return IOU_COMPLETE;
} else {
@ -1049,15 +1049,15 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
* Reset rw->len to 0 again to avoid clamping future mshot
* reads, in case the buffer size varies.
*/
if (io_kbuf_recycle(req, issue_flags))
if (io_kbuf_recycle(req, req->buf_list, issue_flags))
rw->len = 0;
return IOU_RETRY;
} else if (ret <= 0) {
io_kbuf_recycle(req, issue_flags);
io_kbuf_recycle(req, req->buf_list, issue_flags);
if (ret < 0)
req_set_fail(req);
} else if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
cflags = io_put_kbuf(req, ret);
cflags = io_put_kbuf(req, ret, req->buf_list);
} else {
/*
* Any successful return value will keep the multishot read
@ -1065,7 +1065,7 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
* we fail to post a CQE, or multishot is no longer set, then
* jump to the termination path. This request is then done.
*/
cflags = io_put_kbuf(req, ret);
cflags = io_put_kbuf(req, ret, req->buf_list);
rw->len = 0; /* similarly to above, reset len to 0 */
if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) {
@ -1362,7 +1362,7 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
if (!smp_load_acquire(&req->iopoll_completed))
break;
nr_events++;
req->cqe.flags = io_put_kbuf(req, req->cqe.res);
req->cqe.flags = io_put_kbuf(req, req->cqe.res, req->buf_list);
if (req->opcode != IORING_OP_URING_CMD)
io_req_rw_cleanup(req, 0);
}