|
|
|
|
@ -15,6 +15,7 @@
|
|
|
|
|
#include <net/sock.h>
|
|
|
|
|
#include <net/tcp.h>
|
|
|
|
|
#include <net/net_namespace.h>
|
|
|
|
|
#include <net/page_pool.h>
|
|
|
|
|
#include <linux/error-injection.h>
|
|
|
|
|
#include <linux/smp.h>
|
|
|
|
|
#include <linux/sock_diag.h>
|
|
|
|
|
@ -53,10 +54,11 @@ static void bpf_test_timer_leave(struct bpf_test_timer *t)
|
|
|
|
|
rcu_read_unlock();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool bpf_test_timer_continue(struct bpf_test_timer *t, u32 repeat, int *err, u32 *duration)
|
|
|
|
|
static bool bpf_test_timer_continue(struct bpf_test_timer *t, int iterations,
|
|
|
|
|
u32 repeat, int *err, u32 *duration)
|
|
|
|
|
__must_hold(rcu)
|
|
|
|
|
{
|
|
|
|
|
t->i++;
|
|
|
|
|
t->i += iterations;
|
|
|
|
|
if (t->i >= repeat) {
|
|
|
|
|
/* We're done. */
|
|
|
|
|
t->time_spent += ktime_get_ns() - t->time_start;
|
|
|
|
|
@ -88,6 +90,286 @@ reset:
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* We put this struct at the head of each page with a context and frame
|
|
|
|
|
* initialised when the page is allocated, so we don't have to do this on each
|
|
|
|
|
* repetition of the test run.
|
|
|
|
|
*/
|
|
|
|
|
struct xdp_page_head {
|
|
|
|
|
struct xdp_buff orig_ctx;
|
|
|
|
|
struct xdp_buff ctx;
|
|
|
|
|
struct xdp_frame frm;
|
|
|
|
|
u8 data[];
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
struct xdp_test_data {
|
|
|
|
|
struct xdp_buff *orig_ctx;
|
|
|
|
|
struct xdp_rxq_info rxq;
|
|
|
|
|
struct net_device *dev;
|
|
|
|
|
struct page_pool *pp;
|
|
|
|
|
struct xdp_frame **frames;
|
|
|
|
|
struct sk_buff **skbs;
|
|
|
|
|
u32 batch_size;
|
|
|
|
|
u32 frame_cnt;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
#define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head) \
|
|
|
|
|
- sizeof(struct skb_shared_info))
|
|
|
|
|
#define TEST_XDP_MAX_BATCH 256
|
|
|
|
|
|
|
|
|
|
static void xdp_test_run_init_page(struct page *page, void *arg)
|
|
|
|
|
{
|
|
|
|
|
struct xdp_page_head *head = phys_to_virt(page_to_phys(page));
|
|
|
|
|
struct xdp_buff *new_ctx, *orig_ctx;
|
|
|
|
|
u32 headroom = XDP_PACKET_HEADROOM;
|
|
|
|
|
struct xdp_test_data *xdp = arg;
|
|
|
|
|
size_t frm_len, meta_len;
|
|
|
|
|
struct xdp_frame *frm;
|
|
|
|
|
void *data;
|
|
|
|
|
|
|
|
|
|
orig_ctx = xdp->orig_ctx;
|
|
|
|
|
frm_len = orig_ctx->data_end - orig_ctx->data_meta;
|
|
|
|
|
meta_len = orig_ctx->data - orig_ctx->data_meta;
|
|
|
|
|
headroom -= meta_len;
|
|
|
|
|
|
|
|
|
|
new_ctx = &head->ctx;
|
|
|
|
|
frm = &head->frm;
|
|
|
|
|
data = &head->data;
|
|
|
|
|
memcpy(data + headroom, orig_ctx->data_meta, frm_len);
|
|
|
|
|
|
|
|
|
|
xdp_init_buff(new_ctx, TEST_XDP_FRAME_SIZE, &xdp->rxq);
|
|
|
|
|
xdp_prepare_buff(new_ctx, data, headroom, frm_len, true);
|
|
|
|
|
new_ctx->data = new_ctx->data_meta + meta_len;
|
|
|
|
|
|
|
|
|
|
xdp_update_frame_from_buff(new_ctx, frm);
|
|
|
|
|
frm->mem = new_ctx->rxq->mem;
|
|
|
|
|
|
|
|
|
|
memcpy(&head->orig_ctx, new_ctx, sizeof(head->orig_ctx));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx)
|
|
|
|
|
{
|
|
|
|
|
struct xdp_mem_info mem = {};
|
|
|
|
|
struct page_pool *pp;
|
|
|
|
|
int err = -ENOMEM;
|
|
|
|
|
struct page_pool_params pp_params = {
|
|
|
|
|
.order = 0,
|
|
|
|
|
.flags = 0,
|
|
|
|
|
.pool_size = xdp->batch_size,
|
|
|
|
|
.nid = NUMA_NO_NODE,
|
|
|
|
|
.max_len = TEST_XDP_FRAME_SIZE,
|
|
|
|
|
.init_callback = xdp_test_run_init_page,
|
|
|
|
|
.init_arg = xdp,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
xdp->frames = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
|
|
|
|
|
if (!xdp->frames)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
|
|
xdp->skbs = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL);
|
|
|
|
|
if (!xdp->skbs)
|
|
|
|
|
goto err_skbs;
|
|
|
|
|
|
|
|
|
|
pp = page_pool_create(&pp_params);
|
|
|
|
|
if (IS_ERR(pp)) {
|
|
|
|
|
err = PTR_ERR(pp);
|
|
|
|
|
goto err_pp;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* will copy 'mem.id' into pp->xdp_mem_id */
|
|
|
|
|
err = xdp_reg_mem_model(&mem, MEM_TYPE_PAGE_POOL, pp);
|
|
|
|
|
if (err)
|
|
|
|
|
goto err_mmodel;
|
|
|
|
|
|
|
|
|
|
xdp->pp = pp;
|
|
|
|
|
|
|
|
|
|
/* We create a 'fake' RXQ referencing the original dev, but with an
|
|
|
|
|
* xdp_mem_info pointing to our page_pool
|
|
|
|
|
*/
|
|
|
|
|
xdp_rxq_info_reg(&xdp->rxq, orig_ctx->rxq->dev, 0, 0);
|
|
|
|
|
xdp->rxq.mem.type = MEM_TYPE_PAGE_POOL;
|
|
|
|
|
xdp->rxq.mem.id = pp->xdp_mem_id;
|
|
|
|
|
xdp->dev = orig_ctx->rxq->dev;
|
|
|
|
|
xdp->orig_ctx = orig_ctx;
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
err_mmodel:
|
|
|
|
|
page_pool_destroy(pp);
|
|
|
|
|
err_pp:
|
|
|
|
|
kfree(xdp->skbs);
|
|
|
|
|
err_skbs:
|
|
|
|
|
kfree(xdp->frames);
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void xdp_test_run_teardown(struct xdp_test_data *xdp)
|
|
|
|
|
{
|
|
|
|
|
page_pool_destroy(xdp->pp);
|
|
|
|
|
kfree(xdp->frames);
|
|
|
|
|
kfree(xdp->skbs);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool ctx_was_changed(struct xdp_page_head *head)
|
|
|
|
|
{
|
|
|
|
|
return head->orig_ctx.data != head->ctx.data ||
|
|
|
|
|
head->orig_ctx.data_meta != head->ctx.data_meta ||
|
|
|
|
|
head->orig_ctx.data_end != head->ctx.data_end;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void reset_ctx(struct xdp_page_head *head)
|
|
|
|
|
{
|
|
|
|
|
if (likely(!ctx_was_changed(head)))
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
head->ctx.data = head->orig_ctx.data;
|
|
|
|
|
head->ctx.data_meta = head->orig_ctx.data_meta;
|
|
|
|
|
head->ctx.data_end = head->orig_ctx.data_end;
|
|
|
|
|
xdp_update_frame_from_buff(&head->ctx, &head->frm);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int xdp_recv_frames(struct xdp_frame **frames, int nframes,
|
|
|
|
|
struct sk_buff **skbs,
|
|
|
|
|
struct net_device *dev)
|
|
|
|
|
{
|
|
|
|
|
gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
|
|
|
|
|
int i, n;
|
|
|
|
|
LIST_HEAD(list);
|
|
|
|
|
|
|
|
|
|
n = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, (void **)skbs);
|
|
|
|
|
if (unlikely(n == 0)) {
|
|
|
|
|
for (i = 0; i < nframes; i++)
|
|
|
|
|
xdp_return_frame(frames[i]);
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < nframes; i++) {
|
|
|
|
|
struct xdp_frame *xdpf = frames[i];
|
|
|
|
|
struct sk_buff *skb = skbs[i];
|
|
|
|
|
|
|
|
|
|
skb = __xdp_build_skb_from_frame(xdpf, skb, dev);
|
|
|
|
|
if (!skb) {
|
|
|
|
|
xdp_return_frame(xdpf);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
list_add_tail(&skb->list, &list);
|
|
|
|
|
}
|
|
|
|
|
netif_receive_skb_list(&list);
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog,
|
|
|
|
|
u32 repeat)
|
|
|
|
|
{
|
|
|
|
|
struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
|
|
|
|
|
int err = 0, act, ret, i, nframes = 0, batch_sz;
|
|
|
|
|
struct xdp_frame **frames = xdp->frames;
|
|
|
|
|
struct xdp_page_head *head;
|
|
|
|
|
struct xdp_frame *frm;
|
|
|
|
|
bool redirect = false;
|
|
|
|
|
struct xdp_buff *ctx;
|
|
|
|
|
struct page *page;
|
|
|
|
|
|
|
|
|
|
batch_sz = min_t(u32, repeat, xdp->batch_size);
|
|
|
|
|
|
|
|
|
|
local_bh_disable();
|
|
|
|
|
xdp_set_return_frame_no_direct();
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < batch_sz; i++) {
|
|
|
|
|
page = page_pool_dev_alloc_pages(xdp->pp);
|
|
|
|
|
if (!page) {
|
|
|
|
|
err = -ENOMEM;
|
|
|
|
|
goto out;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
head = phys_to_virt(page_to_phys(page));
|
|
|
|
|
reset_ctx(head);
|
|
|
|
|
ctx = &head->ctx;
|
|
|
|
|
frm = &head->frm;
|
|
|
|
|
xdp->frame_cnt++;
|
|
|
|
|
|
|
|
|
|
act = bpf_prog_run_xdp(prog, ctx);
|
|
|
|
|
|
|
|
|
|
/* if program changed pkt bounds we need to update the xdp_frame */
|
|
|
|
|
if (unlikely(ctx_was_changed(head))) {
|
|
|
|
|
ret = xdp_update_frame_from_buff(ctx, frm);
|
|
|
|
|
if (ret) {
|
|
|
|
|
xdp_return_buff(ctx);
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch (act) {
|
|
|
|
|
case XDP_TX:
|
|
|
|
|
/* we can't do a real XDP_TX since we're not in the
|
|
|
|
|
* driver, so turn it into a REDIRECT back to the same
|
|
|
|
|
* index
|
|
|
|
|
*/
|
|
|
|
|
ri->tgt_index = xdp->dev->ifindex;
|
|
|
|
|
ri->map_id = INT_MAX;
|
|
|
|
|
ri->map_type = BPF_MAP_TYPE_UNSPEC;
|
|
|
|
|
fallthrough;
|
|
|
|
|
case XDP_REDIRECT:
|
|
|
|
|
redirect = true;
|
|
|
|
|
ret = xdp_do_redirect_frame(xdp->dev, ctx, frm, prog);
|
|
|
|
|
if (ret)
|
|
|
|
|
xdp_return_buff(ctx);
|
|
|
|
|
break;
|
|
|
|
|
case XDP_PASS:
|
|
|
|
|
frames[nframes++] = frm;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
bpf_warn_invalid_xdp_action(NULL, prog, act);
|
|
|
|
|
fallthrough;
|
|
|
|
|
case XDP_DROP:
|
|
|
|
|
xdp_return_buff(ctx);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
out:
|
|
|
|
|
if (redirect)
|
|
|
|
|
xdp_do_flush();
|
|
|
|
|
if (nframes) {
|
|
|
|
|
ret = xdp_recv_frames(frames, nframes, xdp->skbs, xdp->dev);
|
|
|
|
|
if (ret)
|
|
|
|
|
err = ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
xdp_clear_return_frame_no_direct();
|
|
|
|
|
local_bh_enable();
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx,
|
|
|
|
|
u32 repeat, u32 batch_size, u32 *time)
|
|
|
|
|
|
|
|
|
|
{
|
|
|
|
|
struct xdp_test_data xdp = { .batch_size = batch_size };
|
|
|
|
|
struct bpf_test_timer t = { .mode = NO_MIGRATE };
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
if (!repeat)
|
|
|
|
|
repeat = 1;
|
|
|
|
|
|
|
|
|
|
ret = xdp_test_run_setup(&xdp, ctx);
|
|
|
|
|
if (ret)
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
|
|
bpf_test_timer_enter(&t);
|
|
|
|
|
do {
|
|
|
|
|
xdp.frame_cnt = 0;
|
|
|
|
|
ret = xdp_test_run_batch(&xdp, prog, repeat - t.i);
|
|
|
|
|
if (unlikely(ret < 0))
|
|
|
|
|
break;
|
|
|
|
|
} while (bpf_test_timer_continue(&t, xdp.frame_cnt, repeat, &ret, time));
|
|
|
|
|
bpf_test_timer_leave(&t);
|
|
|
|
|
|
|
|
|
|
xdp_test_run_teardown(&xdp);
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
|
|
|
|
|
u32 *retval, u32 *time, bool xdp)
|
|
|
|
|
{
|
|
|
|
|
@ -119,7 +401,7 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
|
|
|
|
|
*retval = bpf_prog_run_xdp(prog, ctx);
|
|
|
|
|
else
|
|
|
|
|
*retval = bpf_prog_run(prog, ctx);
|
|
|
|
|
} while (bpf_test_timer_continue(&t, repeat, &ret, time));
|
|
|
|
|
} while (bpf_test_timer_continue(&t, 1, repeat, &ret, time));
|
|
|
|
|
bpf_reset_run_ctx(old_ctx);
|
|
|
|
|
bpf_test_timer_leave(&t);
|
|
|
|
|
|
|
|
|
|
@ -446,7 +728,7 @@ int bpf_prog_test_run_tracing(struct bpf_prog *prog,
|
|
|
|
|
int b = 2, err = -EFAULT;
|
|
|
|
|
u32 retval = 0;
|
|
|
|
|
|
|
|
|
|
if (kattr->test.flags || kattr->test.cpu)
|
|
|
|
|
if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
switch (prog->expected_attach_type) {
|
|
|
|
|
@ -510,7 +792,7 @@ int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
|
|
|
|
|
/* doesn't support data_in/out, ctx_out, duration, or repeat */
|
|
|
|
|
if (kattr->test.data_in || kattr->test.data_out ||
|
|
|
|
|
kattr->test.ctx_out || kattr->test.duration ||
|
|
|
|
|
kattr->test.repeat)
|
|
|
|
|
kattr->test.repeat || kattr->test.batch_size)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if (ctx_size_in < prog->aux->max_ctx_offset ||
|
|
|
|
|
@ -741,7 +1023,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
|
|
|
|
|
void *data;
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
if (kattr->test.flags || kattr->test.cpu)
|
|
|
|
|
if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
data = bpf_test_init(kattr, kattr->test.data_size_in,
|
|
|
|
|
@ -922,7 +1204,9 @@ static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md)
|
|
|
|
|
int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
|
|
|
|
|
union bpf_attr __user *uattr)
|
|
|
|
|
{
|
|
|
|
|
bool do_live = (kattr->test.flags & BPF_F_TEST_XDP_LIVE_FRAMES);
|
|
|
|
|
u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
|
|
|
|
u32 batch_size = kattr->test.batch_size;
|
|
|
|
|
u32 size = kattr->test.data_size_in;
|
|
|
|
|
u32 headroom = XDP_PACKET_HEADROOM;
|
|
|
|
|
u32 retval, duration, max_data_sz;
|
|
|
|
|
@ -938,6 +1222,18 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
|
|
|
|
|
prog->expected_attach_type == BPF_XDP_CPUMAP)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if (kattr->test.flags & ~BPF_F_TEST_XDP_LIVE_FRAMES)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if (do_live) {
|
|
|
|
|
if (!batch_size)
|
|
|
|
|
batch_size = NAPI_POLL_WEIGHT;
|
|
|
|
|
else if (batch_size > TEST_XDP_MAX_BATCH)
|
|
|
|
|
return -E2BIG;
|
|
|
|
|
} else if (batch_size) {
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md));
|
|
|
|
|
if (IS_ERR(ctx))
|
|
|
|
|
return PTR_ERR(ctx);
|
|
|
|
|
@ -946,14 +1242,20 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
|
|
|
|
|
/* There can't be user provided data before the meta data */
|
|
|
|
|
if (ctx->data_meta || ctx->data_end != size ||
|
|
|
|
|
ctx->data > ctx->data_end ||
|
|
|
|
|
unlikely(xdp_metalen_invalid(ctx->data)))
|
|
|
|
|
unlikely(xdp_metalen_invalid(ctx->data)) ||
|
|
|
|
|
(do_live && (kattr->test.data_out || kattr->test.ctx_out)))
|
|
|
|
|
goto free_ctx;
|
|
|
|
|
/* Meta data is allocated from the headroom */
|
|
|
|
|
headroom -= ctx->data;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
max_data_sz = 4096 - headroom - tailroom;
|
|
|
|
|
size = min_t(u32, size, max_data_sz);
|
|
|
|
|
if (size > max_data_sz) {
|
|
|
|
|
/* disallow live data mode for jumbo frames */
|
|
|
|
|
if (do_live)
|
|
|
|
|
goto free_ctx;
|
|
|
|
|
size = max_data_sz;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom);
|
|
|
|
|
if (IS_ERR(data)) {
|
|
|
|
|
@ -1011,6 +1313,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
|
|
|
|
|
if (repeat > 1)
|
|
|
|
|
bpf_prog_change_xdp(NULL, prog);
|
|
|
|
|
|
|
|
|
|
if (do_live)
|
|
|
|
|
ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration);
|
|
|
|
|
else
|
|
|
|
|
ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
|
|
|
|
|
/* We convert the xdp_buff back to an xdp_md before checking the return
|
|
|
|
|
* code so the reference count of any held netdevice will be decremented
|
|
|
|
|
@ -1073,7 +1378,7 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
|
|
|
|
|
if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if (kattr->test.flags || kattr->test.cpu)
|
|
|
|
|
if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if (size < ETH_HLEN)
|
|
|
|
|
@ -1108,7 +1413,7 @@ int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
|
|
|
|
|
do {
|
|
|
|
|
retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
|
|
|
|
|
size, flags);
|
|
|
|
|
} while (bpf_test_timer_continue(&t, repeat, &ret, &duration));
|
|
|
|
|
} while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
|
|
|
|
|
bpf_test_timer_leave(&t);
|
|
|
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
|
@ -1140,7 +1445,7 @@ int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kat
|
|
|
|
|
if (prog->type != BPF_PROG_TYPE_SK_LOOKUP)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if (kattr->test.flags || kattr->test.cpu)
|
|
|
|
|
if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out ||
|
|
|
|
|
@ -1203,7 +1508,7 @@ int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kat
|
|
|
|
|
do {
|
|
|
|
|
ctx.selected_sk = NULL;
|
|
|
|
|
retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, bpf_prog_run);
|
|
|
|
|
} while (bpf_test_timer_continue(&t, repeat, &ret, &duration));
|
|
|
|
|
} while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration));
|
|
|
|
|
bpf_test_timer_leave(&t);
|
|
|
|
|
|
|
|
|
|
if (ret < 0)
|
|
|
|
|
@ -1242,7 +1547,8 @@ int bpf_prog_test_run_syscall(struct bpf_prog *prog,
|
|
|
|
|
/* doesn't support data_in/out, ctx_out, duration, or repeat or flags */
|
|
|
|
|
if (kattr->test.data_in || kattr->test.data_out ||
|
|
|
|
|
kattr->test.ctx_out || kattr->test.duration ||
|
|
|
|
|
kattr->test.repeat || kattr->test.flags)
|
|
|
|
|
kattr->test.repeat || kattr->test.flags ||
|
|
|
|
|
kattr->test.batch_size)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
|
|
|
|
|
if (ctx_size_in < prog->aux->max_ctx_offset ||
|
|
|
|
|
|