io_uring: open code io_req_cqe_overflow()

A preparation patch, just open code io_req_cqe_overflow().

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Reviewed-by: Caleb Sander Mateos <csander@purestorage.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
pull/1250/head
Pavel Begunkov 2025-05-14 09:07:20 +01:00 committed by Jens Axboe
parent 16256648cd
commit 5288b9e28f
1 changed files with 10 additions and 10 deletions

View File

@ -739,14 +739,6 @@ static bool io_cqring_event_overflow(struct io_ring_ctx *ctx, u64 user_data,
return true;
}
static void io_req_cqe_overflow(struct io_kiocb *req)
{
io_cqring_event_overflow(req->ctx, req->cqe.user_data,
req->cqe.res, req->cqe.flags,
req->big_cqe.extra1, req->big_cqe.extra2);
memset(&req->big_cqe, 0, sizeof(req->big_cqe));
}
/*
* writes to the cq entry need to come after reading head; the
* control dependency is enough as we're using WRITE_ONCE to
@ -1435,11 +1427,19 @@ void __io_submit_flush_completions(struct io_ring_ctx *ctx)
unlikely(!io_fill_cqe_req(ctx, req))) {
if (ctx->lockless_cq) {
spin_lock(&ctx->completion_lock);
io_req_cqe_overflow(req);
io_cqring_event_overflow(req->ctx, req->cqe.user_data,
req->cqe.res, req->cqe.flags,
req->big_cqe.extra1,
req->big_cqe.extra2);
spin_unlock(&ctx->completion_lock);
} else {
io_req_cqe_overflow(req);
io_cqring_event_overflow(req->ctx, req->cqe.user_data,
req->cqe.res, req->cqe.flags,
req->big_cqe.extra1,
req->big_cqe.extra2);
}
memset(&req->big_cqe, 0, sizeof(req->big_cqe));
}
}
__io_cq_unlock_post(ctx);