io_uring/zcrx: add sync refill queue flushing
Add an zcrx interface via IORING_REGISTER_ZCRX_CTRL that forces the kernel to flush / consume entries from the refill queue. Just as with the IORING_REGISTER_ZCRX_REFILL attempt, the motivation is to address cases where the refill queue becomes full, and the user can't return buffers and needs to stash them. It's still a slow path, and the user should size refill queue appropriately, but it should be helpful for handling temporary traffic spikes and other unpredictable conditions. The interface is simpler comparing to ZCRX_REFILL as it doesn't need temporary refill entry arrays and gives natural batching, whereas ZCRX_REFILL requires even more user logic to be somewhat efficient. Also, add a structure for the operation. It's not currently used but can serve for future improvements like limiting the number of buffers to process, etc. Signed-off-by: Pavel Begunkov <asml.silence@gmail.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>pull/1354/merge
parent
d663976dad
commit
475eb39b00
|
|
@ -1082,13 +1082,21 @@ struct io_uring_zcrx_ifq_reg {
|
||||||
};
|
};
|
||||||
|
|
||||||
enum zcrx_ctrl_op {
|
enum zcrx_ctrl_op {
|
||||||
|
ZCRX_CTRL_FLUSH_RQ,
|
||||||
|
|
||||||
__ZCRX_CTRL_LAST,
|
__ZCRX_CTRL_LAST,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct zcrx_ctrl_flush_rq {
|
||||||
|
__u64 __resv[6];
|
||||||
|
};
|
||||||
|
|
||||||
struct zcrx_ctrl {
|
struct zcrx_ctrl {
|
||||||
__u32 zcrx_id;
|
__u32 zcrx_id;
|
||||||
__u32 op; /* see enum zcrx_ctrl_op */
|
__u32 op; /* see enum zcrx_ctrl_op */
|
||||||
__u64 __resv[8];
|
__u64 __resv[2];
|
||||||
|
|
||||||
|
struct zcrx_ctrl_flush_rq zc_flush;
|
||||||
};
|
};
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
|
|
|
||||||
|
|
@ -941,6 +941,71 @@ static const struct memory_provider_ops io_uring_pp_zc_ops = {
|
||||||
.uninstall = io_pp_uninstall,
|
.uninstall = io_pp_uninstall,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static unsigned zcrx_parse_rq(netmem_ref *netmem_array, unsigned nr,
|
||||||
|
struct io_zcrx_ifq *zcrx)
|
||||||
|
{
|
||||||
|
unsigned int mask = zcrx->rq_entries - 1;
|
||||||
|
unsigned int i;
|
||||||
|
|
||||||
|
guard(spinlock_bh)(&zcrx->rq_lock);
|
||||||
|
|
||||||
|
nr = min(nr, io_zcrx_rqring_entries(zcrx));
|
||||||
|
for (i = 0; i < nr; i++) {
|
||||||
|
struct io_uring_zcrx_rqe *rqe = io_zcrx_get_rqe(zcrx, mask);
|
||||||
|
struct net_iov *niov;
|
||||||
|
|
||||||
|
if (!io_parse_rqe(rqe, zcrx, &niov))
|
||||||
|
break;
|
||||||
|
netmem_array[i] = net_iov_to_netmem(niov);
|
||||||
|
}
|
||||||
|
|
||||||
|
smp_store_release(&zcrx->rq_ring->head, zcrx->cached_rq_head);
|
||||||
|
return i;
|
||||||
|
}
|
||||||
|
|
||||||
|
#define ZCRX_FLUSH_BATCH 32
|
||||||
|
|
||||||
|
static void zcrx_return_buffers(netmem_ref *netmems, unsigned nr)
|
||||||
|
{
|
||||||
|
unsigned i;
|
||||||
|
|
||||||
|
for (i = 0; i < nr; i++) {
|
||||||
|
netmem_ref netmem = netmems[i];
|
||||||
|
struct net_iov *niov = netmem_to_net_iov(netmem);
|
||||||
|
|
||||||
|
if (!io_zcrx_put_niov_uref(niov))
|
||||||
|
continue;
|
||||||
|
if (!page_pool_unref_and_test(netmem))
|
||||||
|
continue;
|
||||||
|
io_zcrx_return_niov(niov);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static int zcrx_flush_rq(struct io_ring_ctx *ctx, struct io_zcrx_ifq *zcrx,
|
||||||
|
struct zcrx_ctrl *ctrl)
|
||||||
|
{
|
||||||
|
struct zcrx_ctrl_flush_rq *frq = &ctrl->zc_flush;
|
||||||
|
netmem_ref netmems[ZCRX_FLUSH_BATCH];
|
||||||
|
unsigned total = 0;
|
||||||
|
unsigned nr;
|
||||||
|
|
||||||
|
if (!mem_is_zero(&frq->__resv, sizeof(frq->__resv)))
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
do {
|
||||||
|
nr = zcrx_parse_rq(netmems, ZCRX_FLUSH_BATCH, zcrx);
|
||||||
|
|
||||||
|
zcrx_return_buffers(netmems, nr);
|
||||||
|
total += nr;
|
||||||
|
|
||||||
|
if (fatal_signal_pending(current))
|
||||||
|
break;
|
||||||
|
cond_resched();
|
||||||
|
} while (nr == ZCRX_FLUSH_BATCH && total < zcrx->rq_entries);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
int io_zcrx_ctrl(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
|
int io_zcrx_ctrl(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
|
||||||
{
|
{
|
||||||
struct zcrx_ctrl ctrl;
|
struct zcrx_ctrl ctrl;
|
||||||
|
|
@ -956,10 +1021,13 @@ int io_zcrx_ctrl(struct io_ring_ctx *ctx, void __user *arg, unsigned nr_args)
|
||||||
zcrx = xa_load(&ctx->zcrx_ctxs, ctrl.zcrx_id);
|
zcrx = xa_load(&ctx->zcrx_ctxs, ctrl.zcrx_id);
|
||||||
if (!zcrx)
|
if (!zcrx)
|
||||||
return -ENXIO;
|
return -ENXIO;
|
||||||
if (ctrl.op >= __ZCRX_CTRL_LAST)
|
|
||||||
return -EOPNOTSUPP;
|
|
||||||
|
|
||||||
return -EINVAL;
|
switch (ctrl.op) {
|
||||||
|
case ZCRX_CTRL_FLUSH_RQ:
|
||||||
|
return zcrx_flush_rq(ctx, zcrx, &ctrl);
|
||||||
|
}
|
||||||
|
|
||||||
|
return -EOPNOTSUPP;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool io_zcrx_queue_cqe(struct io_kiocb *req, struct net_iov *niov,
|
static bool io_zcrx_queue_cqe(struct io_kiocb *req, struct net_iov *niov,
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue