io_uring/net: Allow to do vectorized send

At the moment you have to use sendmsg for vectorized send.
While this works it's suboptimal as it also means you need to
allocate a struct msghdr that needs to be kept alive until a
submission happens. We can remove this limitation by just
allowing to use send directly.

Signed-off-by: Norman Maurer <norman_maurer@apple.com>
Link: https://lore.kernel.org/r/20250729065952.26646-1-norman_maurer@apple.com
[axboe: remove -EINVAL return for SENDMSG and SEND_VECTORIZED]
[axboe: allow send_zc to set SEND_VECTORIZED too]
Signed-off-by: Jens Axboe <axboe@kernel.dk>
pull/1340/head
Norman Maurer 2025-07-28 20:59:53 -10:00 committed by Jens Axboe
parent 4b290aae78
commit 6f02527729
2 changed files with 11 additions and 2 deletions

View File

@ -392,12 +392,16 @@ enum io_uring_op {
* the starting buffer ID in cqe->flags as per * the starting buffer ID in cqe->flags as per
* usual for provided buffer usage. The buffers * usual for provided buffer usage. The buffers
* will be contiguous from the starting buffer ID. * will be contiguous from the starting buffer ID.
*
* IORING_SEND_VECTORIZED If set, SEND[_ZC] will take a pointer to a io_vec
* to allow vectorized send operations.
*/ */
#define IORING_RECVSEND_POLL_FIRST (1U << 0) #define IORING_RECVSEND_POLL_FIRST (1U << 0)
#define IORING_RECV_MULTISHOT (1U << 1) #define IORING_RECV_MULTISHOT (1U << 1)
#define IORING_RECVSEND_FIXED_BUF (1U << 2) #define IORING_RECVSEND_FIXED_BUF (1U << 2)
#define IORING_SEND_ZC_REPORT_USAGE (1U << 3) #define IORING_SEND_ZC_REPORT_USAGE (1U << 3)
#define IORING_RECVSEND_BUNDLE (1U << 4) #define IORING_RECVSEND_BUNDLE (1U << 4)
#define IORING_SEND_VECTORIZED (1U << 5)
/* /*
* cqe.res for IORING_CQE_F_NOTIF if * cqe.res for IORING_CQE_F_NOTIF if

View File

@ -382,6 +382,10 @@ static int io_send_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe)
} }
if (req->flags & REQ_F_BUFFER_SELECT) if (req->flags & REQ_F_BUFFER_SELECT)
return 0; return 0;
if (sr->flags & IORING_SEND_VECTORIZED)
return io_net_import_vec(req, kmsg, sr->buf, sr->len, ITER_SOURCE);
return import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter); return import_ubuf(ITER_SOURCE, sr->buf, sr->len, &kmsg->msg.msg_iter);
} }
@ -409,7 +413,7 @@ static int io_sendmsg_setup(struct io_kiocb *req, const struct io_uring_sqe *sqe
return io_net_import_vec(req, kmsg, msg.msg_iov, msg.msg_iovlen, ITER_SOURCE); return io_net_import_vec(req, kmsg, msg.msg_iov, msg.msg_iovlen, ITER_SOURCE);
} }
#define SENDMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_BUNDLE) #define SENDMSG_FLAGS (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_BUNDLE | IORING_SEND_VECTORIZED)
int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {
@ -1318,7 +1322,8 @@ void io_send_zc_cleanup(struct io_kiocb *req)
} }
#define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF) #define IO_ZC_FLAGS_COMMON (IORING_RECVSEND_POLL_FIRST | IORING_RECVSEND_FIXED_BUF)
#define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE) #define IO_ZC_FLAGS_VALID (IO_ZC_FLAGS_COMMON | IORING_SEND_ZC_REPORT_USAGE | \
IORING_SEND_VECTORIZED)
int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{ {