io_uring/kbuf: drop 'issue_flags' from io_put_kbuf(s)() arguments

Picking multiple buffers always requires the ring lock to be held across
the operation, so there's no need to pass in the issue_flags to
io_put_kbufs(). On the single buffer side, if the initial picking of a
ring buffer was unlocked, then it will have been committed already. For
legacy buffers, no locking is required, as they will simply be freed.

Link: https://lore.kernel.org/r/20250821020750.598432-3-axboe@kernel.dk
Signed-off-by: Jens Axboe <axboe@kernel.dk>
pull/1354/merge
Jens Axboe 2025-08-20 20:03:30 -06:00
parent ab3ea6eac5
commit 5e73b402cb
4 changed files with 14 additions and 17 deletions

View File

@ -1007,7 +1007,7 @@ void io_req_defer_failed(struct io_kiocb *req, s32 res)
lockdep_assert_held(&req->ctx->uring_lock);
req_set_fail(req);
io_req_set_res(req, res, io_put_kbuf(req, res, IO_URING_F_UNLOCKED));
io_req_set_res(req, res, io_put_kbuf(req, res));
if (def->fail)
def->fail(req);
io_req_complete_defer(req);

View File

@ -121,8 +121,7 @@ static inline bool io_kbuf_recycle(struct io_kiocb *req, unsigned issue_flags)
return false;
}
static inline unsigned int io_put_kbuf(struct io_kiocb *req, int len,
unsigned issue_flags)
static inline unsigned int io_put_kbuf(struct io_kiocb *req, int len)
{
if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
return 0;
@ -130,7 +129,7 @@ static inline unsigned int io_put_kbuf(struct io_kiocb *req, int len,
}
static inline unsigned int io_put_kbufs(struct io_kiocb *req, int len,
int nbufs, unsigned issue_flags)
int nbufs)
{
if (!(req->flags & (REQ_F_BUFFER_RING | REQ_F_BUFFER_SELECTED)))
return 0;

View File

@ -504,19 +504,18 @@ static int io_net_kbuf_recyle(struct io_kiocb *req,
}
static inline bool io_send_finish(struct io_kiocb *req, int *ret,
struct io_async_msghdr *kmsg,
unsigned issue_flags)
struct io_async_msghdr *kmsg)
{
struct io_sr_msg *sr = io_kiocb_to_cmd(req, struct io_sr_msg);
bool bundle_finished = *ret <= 0;
unsigned int cflags;
if (!(sr->flags & IORING_RECVSEND_BUNDLE)) {
cflags = io_put_kbuf(req, *ret, issue_flags);
cflags = io_put_kbuf(req, *ret);
goto finish;
}
cflags = io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret), issue_flags);
cflags = io_put_kbufs(req, *ret, io_bundle_nbufs(kmsg, *ret));
if (bundle_finished || req->flags & REQ_F_BL_EMPTY)
goto finish;
@ -693,7 +692,7 @@ retry_bundle:
else if (sr->done_io)
ret = sr->done_io;
if (!io_send_finish(req, &ret, kmsg, issue_flags))
if (!io_send_finish(req, &ret, kmsg))
goto retry_bundle;
io_req_msg_cleanup(req, issue_flags);
@ -872,8 +871,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
if (sr->flags & IORING_RECVSEND_BUNDLE) {
size_t this_ret = *ret - sr->done_io;
cflags |= io_put_kbufs(req, this_ret, io_bundle_nbufs(kmsg, this_ret),
issue_flags);
cflags |= io_put_kbufs(req, this_ret, io_bundle_nbufs(kmsg, this_ret));
if (sr->flags & IORING_RECV_RETRY)
cflags = req->cqe.flags | (cflags & CQE_F_MASK);
if (sr->mshot_len && *ret >= sr->mshot_len)
@ -895,7 +893,7 @@ static inline bool io_recv_finish(struct io_kiocb *req, int *ret,
return false;
}
} else {
cflags |= io_put_kbuf(req, *ret, issue_flags);
cflags |= io_put_kbuf(req, *ret);
}
/*

View File

@ -576,7 +576,7 @@ void io_req_rw_complete(struct io_kiocb *req, io_tw_token_t tw)
io_req_io_end(req);
if (req->flags & (REQ_F_BUFFER_SELECTED|REQ_F_BUFFER_RING))
req->cqe.flags |= io_put_kbuf(req, req->cqe.res, 0);
req->cqe.flags |= io_put_kbuf(req, req->cqe.res);
io_req_rw_cleanup(req, 0);
io_req_task_complete(req, tw);
@ -659,7 +659,7 @@ static int kiocb_done(struct io_kiocb *req, ssize_t ret,
* from the submission path.
*/
io_req_io_end(req);
io_req_set_res(req, final_ret, io_put_kbuf(req, ret, issue_flags));
io_req_set_res(req, final_ret, io_put_kbuf(req, ret));
io_req_rw_cleanup(req, issue_flags);
return IOU_COMPLETE;
} else {
@ -1057,7 +1057,7 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
if (ret < 0)
req_set_fail(req);
} else if (!(req->flags & REQ_F_APOLL_MULTISHOT)) {
cflags = io_put_kbuf(req, ret, issue_flags);
cflags = io_put_kbuf(req, ret);
} else {
/*
* Any successful return value will keep the multishot read
@ -1065,7 +1065,7 @@ int io_read_mshot(struct io_kiocb *req, unsigned int issue_flags)
* we fail to post a CQE, or multishot is no longer set, then
* jump to the termination path. This request is then done.
*/
cflags = io_put_kbuf(req, ret, issue_flags);
cflags = io_put_kbuf(req, ret);
rw->len = 0; /* similarly to above, reset len to 0 */
if (io_req_post_cqe(req, ret, cflags | IORING_CQE_F_MORE)) {
@ -1362,7 +1362,7 @@ int io_do_iopoll(struct io_ring_ctx *ctx, bool force_nonspin)
if (!smp_load_acquire(&req->iopoll_completed))
break;
nr_events++;
req->cqe.flags = io_put_kbuf(req, req->cqe.res, 0);
req->cqe.flags = io_put_kbuf(req, req->cqe.res);
if (req->opcode != IORING_OP_URING_CMD)
io_req_rw_cleanup(req, 0);
}