io_uring/net: set sg_from_iter in advance

In preparation to the next patch, set ->sg_from_iter callback at request
prep time.

Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
Link: https://lore.kernel.org/r/5fe2972701df3bacdb3d760bce195fa640bee201.1743202294.git.asml.silence@gmail.com
Signed-off-by: Jens Axboe <axboe@kernel.dk>
pull/1194/head
Pavel Begunkov 2025-03-28 23:10:59 +00:00 committed by Jens Axboe
parent 49dbce5602
commit ad3f6cc400
1 changed files with 15 additions and 10 deletions

View File

@ -97,6 +97,11 @@ struct io_recvzc {
struct io_zcrx_ifq *ifq;
};
static int io_sg_from_iter_iovec(struct sk_buff *skb,
struct iov_iter *from, size_t length);
static int io_sg_from_iter(struct sk_buff *skb,
struct iov_iter *from, size_t length);
int io_shutdown_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_shutdown *shutdown = io_kiocb_to_cmd(req, struct io_shutdown);
@ -1266,6 +1271,7 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_sr_msg *zc = io_kiocb_to_cmd(req, struct io_sr_msg);
struct io_ring_ctx *ctx = req->ctx;
struct io_async_msghdr *iomsg;
struct io_kiocb *notif;
int ret;
@ -1308,8 +1314,15 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (io_is_compat(req->ctx))
zc->msg_flags |= MSG_CMSG_COMPAT;
if (unlikely(!io_msg_alloc_async(req)))
iomsg = io_msg_alloc_async(req);
if (unlikely(!iomsg))
return -ENOMEM;
if (zc->flags & IORING_RECVSEND_FIXED_BUF)
iomsg->msg.sg_from_iter = io_sg_from_iter;
else
iomsg->msg.sg_from_iter = io_sg_from_iter_iovec;
if (req->opcode == IORING_OP_SEND_ZC) {
req->flags |= REQ_F_IMPORT_BUFFER;
return io_send_setup(req, sqe);
@ -1320,11 +1333,8 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (unlikely(ret))
return ret;
if (!(zc->flags & IORING_RECVSEND_FIXED_BUF)) {
struct io_async_msghdr *iomsg = req->async_data;
if (!(zc->flags & IORING_RECVSEND_FIXED_BUF))
return io_notif_account_mem(zc->notif, iomsg->msg.msg_iter.count);
}
return 0;
}
@ -1391,7 +1401,6 @@ static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags)
ITER_SOURCE, issue_flags);
if (unlikely(ret))
return ret;
kmsg->msg.sg_from_iter = io_sg_from_iter;
} else {
ret = import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter);
if (unlikely(ret))
@ -1399,7 +1408,6 @@ static int io_send_zc_import(struct io_kiocb *req, unsigned int issue_flags)
ret = io_notif_account_mem(sr->notif, sr->len);
if (unlikely(ret))
return ret;
kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
}
return ret;
@ -1483,8 +1491,6 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
unsigned flags;
int ret, min_ret = 0;
kmsg->msg.sg_from_iter = io_sg_from_iter_iovec;
if (req->flags & REQ_F_IMPORT_BUFFER) {
unsigned uvec_segs = kmsg->msg.msg_iter.nr_segs;
int ret;
@ -1493,7 +1499,6 @@ int io_sendmsg_zc(struct io_kiocb *req, unsigned int issue_flags)
&kmsg->vec, uvec_segs, issue_flags);
if (unlikely(ret))
return ret;
kmsg->msg.sg_from_iter = io_sg_from_iter;
req->flags &= ~REQ_F_IMPORT_BUFFER;
}