|
|
|
|
@ -4386,6 +4386,7 @@ static bool is_spillable_regtype(enum bpf_reg_type type)
|
|
|
|
|
case PTR_TO_MEM:
|
|
|
|
|
case PTR_TO_FUNC:
|
|
|
|
|
case PTR_TO_MAP_KEY:
|
|
|
|
|
case PTR_TO_ARENA:
|
|
|
|
|
return true;
|
|
|
|
|
default:
|
|
|
|
|
return false;
|
|
|
|
|
@ -5828,6 +5829,8 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
|
|
|
|
|
case PTR_TO_XDP_SOCK:
|
|
|
|
|
pointer_desc = "xdp_sock ";
|
|
|
|
|
break;
|
|
|
|
|
case PTR_TO_ARENA:
|
|
|
|
|
return 0;
|
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
@ -6937,6 +6940,9 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
|
|
|
|
|
|
|
|
|
|
if (!err && value_regno >= 0 && (rdonly_mem || t == BPF_READ))
|
|
|
|
|
mark_reg_unknown(env, regs, value_regno);
|
|
|
|
|
} else if (reg->type == PTR_TO_ARENA) {
|
|
|
|
|
if (t == BPF_READ && value_regno >= 0)
|
|
|
|
|
mark_reg_unknown(env, regs, value_regno);
|
|
|
|
|
} else {
|
|
|
|
|
verbose(env, "R%d invalid mem access '%s'\n", regno,
|
|
|
|
|
reg_type_str(env, reg->type));
|
|
|
|
|
@ -8408,6 +8414,7 @@ static int check_func_arg_reg_off(struct bpf_verifier_env *env,
|
|
|
|
|
case PTR_TO_MEM | MEM_RINGBUF:
|
|
|
|
|
case PTR_TO_BUF:
|
|
|
|
|
case PTR_TO_BUF | MEM_RDONLY:
|
|
|
|
|
case PTR_TO_ARENA:
|
|
|
|
|
case SCALAR_VALUE:
|
|
|
|
|
return 0;
|
|
|
|
|
/* All the rest must be rejected, except PTR_TO_BTF_ID which allows
|
|
|
|
|
@ -13852,6 +13859,21 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
|
|
|
|
|
|
|
|
|
|
dst_reg = ®s[insn->dst_reg];
|
|
|
|
|
src_reg = NULL;
|
|
|
|
|
|
|
|
|
|
if (dst_reg->type == PTR_TO_ARENA) {
|
|
|
|
|
struct bpf_insn_aux_data *aux = cur_aux(env);
|
|
|
|
|
|
|
|
|
|
if (BPF_CLASS(insn->code) == BPF_ALU64)
|
|
|
|
|
/*
|
|
|
|
|
* 32-bit operations zero upper bits automatically.
|
|
|
|
|
* 64-bit operations need to be converted to 32.
|
|
|
|
|
*/
|
|
|
|
|
aux->needs_zext = true;
|
|
|
|
|
|
|
|
|
|
/* Any arithmetic operations are allowed on arena pointers */
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (dst_reg->type != SCALAR_VALUE)
|
|
|
|
|
ptr_reg = dst_reg;
|
|
|
|
|
else
|
|
|
|
|
@ -13969,19 +13991,20 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
|
|
|
|
} else if (opcode == BPF_MOV) {
|
|
|
|
|
|
|
|
|
|
if (BPF_SRC(insn->code) == BPF_X) {
|
|
|
|
|
if (insn->imm != 0) {
|
|
|
|
|
if (BPF_CLASS(insn->code) == BPF_ALU) {
|
|
|
|
|
if ((insn->off != 0 && insn->off != 8 && insn->off != 16) ||
|
|
|
|
|
insn->imm) {
|
|
|
|
|
verbose(env, "BPF_MOV uses reserved fields\n");
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (BPF_CLASS(insn->code) == BPF_ALU) {
|
|
|
|
|
if (insn->off != 0 && insn->off != 8 && insn->off != 16) {
|
|
|
|
|
verbose(env, "BPF_MOV uses reserved fields\n");
|
|
|
|
|
} else if (insn->off == BPF_ADDR_SPACE_CAST) {
|
|
|
|
|
if (insn->imm != 1 && insn->imm != 1u << 16) {
|
|
|
|
|
verbose(env, "addr_space_cast insn can only convert between address space 1 and 0\n");
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
if (insn->off != 0 && insn->off != 8 && insn->off != 16 &&
|
|
|
|
|
insn->off != 32) {
|
|
|
|
|
if ((insn->off != 0 && insn->off != 8 && insn->off != 16 &&
|
|
|
|
|
insn->off != 32) || insn->imm) {
|
|
|
|
|
verbose(env, "BPF_MOV uses reserved fields\n");
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
@ -14008,7 +14031,12 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
|
|
|
|
struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
|
|
|
|
|
|
|
|
|
|
if (BPF_CLASS(insn->code) == BPF_ALU64) {
|
|
|
|
|
if (insn->off == 0) {
|
|
|
|
|
if (insn->imm) {
|
|
|
|
|
/* off == BPF_ADDR_SPACE_CAST */
|
|
|
|
|
mark_reg_unknown(env, regs, insn->dst_reg);
|
|
|
|
|
if (insn->imm == 1) /* cast from as(1) to as(0) */
|
|
|
|
|
dst_reg->type = PTR_TO_ARENA;
|
|
|
|
|
} else if (insn->off == 0) {
|
|
|
|
|
/* case: R1 = R2
|
|
|
|
|
* copy register state to dest reg
|
|
|
|
|
*/
|
|
|
|
|
@ -15182,6 +15210,10 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
|
|
|
|
|
|
|
|
|
|
if (insn->src_reg == BPF_PSEUDO_MAP_VALUE ||
|
|
|
|
|
insn->src_reg == BPF_PSEUDO_MAP_IDX_VALUE) {
|
|
|
|
|
if (map->map_type == BPF_MAP_TYPE_ARENA) {
|
|
|
|
|
__mark_reg_unknown(env, dst_reg);
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
dst_reg->type = PTR_TO_MAP_VALUE;
|
|
|
|
|
dst_reg->off = aux->map_off;
|
|
|
|
|
WARN_ON_ONCE(map->max_entries != 1);
|
|
|
|
|
@ -16568,6 +16600,8 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
|
|
|
|
|
* the same stack frame, since fp-8 in foo != fp-8 in bar
|
|
|
|
|
*/
|
|
|
|
|
return regs_exact(rold, rcur, idmap) && rold->frameno == rcur->frameno;
|
|
|
|
|
case PTR_TO_ARENA:
|
|
|
|
|
return true;
|
|
|
|
|
default:
|
|
|
|
|
return regs_exact(rold, rcur, idmap);
|
|
|
|
|
}
|
|
|
|
|
@ -17443,6 +17477,7 @@ static bool reg_type_mismatch_ok(enum bpf_reg_type type)
|
|
|
|
|
case PTR_TO_TCP_SOCK:
|
|
|
|
|
case PTR_TO_XDP_SOCK:
|
|
|
|
|
case PTR_TO_BTF_ID:
|
|
|
|
|
case PTR_TO_ARENA:
|
|
|
|
|
return false;
|
|
|
|
|
default:
|
|
|
|
|
return true;
|
|
|
|
|
@ -18296,6 +18331,31 @@ static int resolve_pseudo_ldimm64(struct bpf_verifier_env *env)
|
|
|
|
|
fdput(f);
|
|
|
|
|
return -EBUSY;
|
|
|
|
|
}
|
|
|
|
|
if (map->map_type == BPF_MAP_TYPE_ARENA) {
|
|
|
|
|
if (env->prog->aux->arena) {
|
|
|
|
|
verbose(env, "Only one arena per program\n");
|
|
|
|
|
fdput(f);
|
|
|
|
|
return -EBUSY;
|
|
|
|
|
}
|
|
|
|
|
if (!env->allow_ptr_leaks || !env->bpf_capable) {
|
|
|
|
|
verbose(env, "CAP_BPF and CAP_PERFMON are required to use arena\n");
|
|
|
|
|
fdput(f);
|
|
|
|
|
return -EPERM;
|
|
|
|
|
}
|
|
|
|
|
if (!env->prog->jit_requested) {
|
|
|
|
|
verbose(env, "JIT is required to use arena\n");
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
}
|
|
|
|
|
if (!bpf_jit_supports_arena()) {
|
|
|
|
|
verbose(env, "JIT doesn't support arena\n");
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
}
|
|
|
|
|
env->prog->aux->arena = (void *)map;
|
|
|
|
|
if (!bpf_arena_get_user_vm_start(env->prog->aux->arena)) {
|
|
|
|
|
verbose(env, "arena's user address must be set via map_extra or mmap()\n");
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fdput(f);
|
|
|
|
|
next_insn:
|
|
|
|
|
@ -18917,6 +18977,14 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
|
|
|
|
|
env->prog->aux->num_exentries++;
|
|
|
|
|
}
|
|
|
|
|
continue;
|
|
|
|
|
case PTR_TO_ARENA:
|
|
|
|
|
if (BPF_MODE(insn->code) == BPF_MEMSX) {
|
|
|
|
|
verbose(env, "sign extending loads from arena are not supported yet\n");
|
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
}
|
|
|
|
|
insn->code = BPF_CLASS(insn->code) | BPF_PROBE_MEM32 | BPF_SIZE(insn->code);
|
|
|
|
|
env->prog->aux->num_exentries++;
|
|
|
|
|
continue;
|
|
|
|
|
default:
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
@ -19102,13 +19170,19 @@ static int jit_subprogs(struct bpf_verifier_env *env)
|
|
|
|
|
func[i]->aux->nr_linfo = prog->aux->nr_linfo;
|
|
|
|
|
func[i]->aux->jited_linfo = prog->aux->jited_linfo;
|
|
|
|
|
func[i]->aux->linfo_idx = env->subprog_info[i].linfo_idx;
|
|
|
|
|
func[i]->aux->arena = prog->aux->arena;
|
|
|
|
|
num_exentries = 0;
|
|
|
|
|
insn = func[i]->insnsi;
|
|
|
|
|
for (j = 0; j < func[i]->len; j++, insn++) {
|
|
|
|
|
if (BPF_CLASS(insn->code) == BPF_LDX &&
|
|
|
|
|
(BPF_MODE(insn->code) == BPF_PROBE_MEM ||
|
|
|
|
|
BPF_MODE(insn->code) == BPF_PROBE_MEM32 ||
|
|
|
|
|
BPF_MODE(insn->code) == BPF_PROBE_MEMSX))
|
|
|
|
|
num_exentries++;
|
|
|
|
|
if ((BPF_CLASS(insn->code) == BPF_STX ||
|
|
|
|
|
BPF_CLASS(insn->code) == BPF_ST) &&
|
|
|
|
|
BPF_MODE(insn->code) == BPF_PROBE_MEM32)
|
|
|
|
|
num_exentries++;
|
|
|
|
|
}
|
|
|
|
|
func[i]->aux->num_exentries = num_exentries;
|
|
|
|
|
func[i]->aux->tail_call_reachable = env->subprog_info[i].tail_call_reachable;
|
|
|
|
|
@ -19507,6 +19581,21 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (i = 0; i < insn_cnt;) {
|
|
|
|
|
if (insn->code == (BPF_ALU64 | BPF_MOV | BPF_X) && insn->imm) {
|
|
|
|
|
if ((insn->off == BPF_ADDR_SPACE_CAST && insn->imm == 1) ||
|
|
|
|
|
(((struct bpf_map *)env->prog->aux->arena)->map_flags & BPF_F_NO_USER_CONV)) {
|
|
|
|
|
/* convert to 32-bit mov that clears upper 32-bit */
|
|
|
|
|
insn->code = BPF_ALU | BPF_MOV | BPF_X;
|
|
|
|
|
/* clear off, so it's a normal 'wX = wY' from JIT pov */
|
|
|
|
|
insn->off = 0;
|
|
|
|
|
} /* cast from as(0) to as(1) should be handled by JIT */
|
|
|
|
|
goto next_insn;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (env->insn_aux_data[i + delta].needs_zext)
|
|
|
|
|
/* Convert BPF_CLASS(insn->code) == BPF_ALU64 to 32-bit ALU */
|
|
|
|
|
insn->code = BPF_ALU | BPF_OP(insn->code) | BPF_SRC(insn->code);
|
|
|
|
|
|
|
|
|
|
/* Make divide-by-zero exceptions impossible. */
|
|
|
|
|
if (insn->code == (BPF_ALU64 | BPF_MOD | BPF_X) ||
|
|
|
|
|
insn->code == (BPF_ALU64 | BPF_DIV | BPF_X) ||
|
|
|
|
|
|