xsk: Extend xsk_rcv_check validation

xsk_rcv_check tests for inbound packets to see whether they match
the bound AF_XDP socket. Refactor the test into a small helper
xsk_dev_queue_valid and move the validation against xs->dev and
xs->queue_id there.

The fast-path case stays in place and allows for quick return in
xsk_dev_queue_valid. If it fails, the validation is extended to
check whether the AF_XDP socket is bound against a leased queue,
and if the case then the test is redone.

Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Co-developed-by: David Wei <dw@davidwei.uk>
Signed-off-by: David Wei <dw@davidwei.uk>
Acked-by: Stanislav Fomichev <sdf@fomichev.me>
Reviewed-by: Nikolay Aleksandrov <razor@blackwall.org>
Link: https://patch.msgid.link/20260115082603.219152-8-daniel@iogearbox.net
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
master
Daniel Borkmann 2026-01-15 09:25:54 +01:00 committed by Paolo Abeni
parent 804bf334d0
commit 1ecea95dd3
1 changed files with 26 additions and 3 deletions

View File

@ -324,14 +324,37 @@ static bool xsk_is_bound(struct xdp_sock *xs)
return false;
}
static bool xsk_dev_queue_valid(const struct xdp_sock *xs,
const struct xdp_rxq_info *info)
{
struct net_device *dev = xs->dev;
u32 queue_index = xs->queue_id;
struct netdev_rx_queue *rxq;
if (info->dev == dev &&
info->queue_index == queue_index)
return true;
if (queue_index < dev->real_num_rx_queues) {
rxq = READ_ONCE(__netif_get_rx_queue(dev, queue_index)->lease);
if (!rxq)
return false;
dev = rxq->dev;
queue_index = get_netdev_rx_queue_index(rxq);
return info->dev == dev &&
info->queue_index == queue_index;
}
return false;
}
static int xsk_rcv_check(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
{
if (!xsk_is_bound(xs))
return -ENXIO;
if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
if (!xsk_dev_queue_valid(xs, xdp->rxq))
return -EINVAL;
if (len > xsk_pool_get_rx_frame_size(xs->pool) && !xs->sg) {
xs->rx_dropped++;
return -ENOSPC;