Skip to content

Commit

Permalink
Merge branch 'bpf-control-flow-graph-and-precision-backtrack-fixes'
Browse files Browse the repository at this point in the history
Andrii Nakryiko says:

====================
BPF control flow graph and precision backtrack fixes

A small fix to BPF verifier's CFG logic around handling and reporting ldimm64
instructions. Patch #1 was previously submitted separately ([0]), and so this
patch set supersedes that patch.

Second patch is fixing obscure corner case in mark_chain_precise() logic. See
patch for details. Patch #3 adds a dedicated test, however fragile it might.

  [0] https://patchwork.kernel.org/project/netdevbpf/patch/[email protected]/
====================

Link: https://lore.kernel.org/r/[email protected]
Signed-off-by: Alexei Starovoitov <[email protected]>
  • Loading branch information
Alexei Starovoitov committed Nov 10, 2023
2 parents fe69a1b + 62ccdb1 commit 8c74b27
Show file tree
Hide file tree
Showing 4 changed files with 89 additions and 15 deletions.
8 changes: 6 additions & 2 deletions include/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -909,10 +909,14 @@ bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size)
aux->ctx_field_size = size;
}

static bool bpf_is_ldimm64(const struct bpf_insn *insn)
{
return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
}

static inline bool bpf_pseudo_func(const struct bpf_insn *insn)
{
return insn->code == (BPF_LD | BPF_IMM | BPF_DW) &&
insn->src_reg == BPF_PSEUDO_FUNC;
return bpf_is_ldimm64(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
}

struct bpf_prog_ops {
Expand Down
48 changes: 39 additions & 9 deletions kernel/bpf/verifier.c
Original file line number Diff line number Diff line change
Expand Up @@ -3516,12 +3516,29 @@ static int push_jmp_history(struct bpf_verifier_env *env,

/* Backtrack one insn at a time. If idx is not at the top of recorded
* history then previous instruction came from straight line execution.
* Return -ENOENT if we exhausted all instructions within given state.
*
* It's legal to have a bit of a looping with the same starting and ending
* insn index within the same state, e.g.: 3->4->5->3, so just because current
* instruction index is the same as state's first_idx doesn't mean we are
* done. If there is still some jump history left, we should keep going. We
* need to take into account that we might have a jump history between given
* state's parent and itself, due to checkpointing. In this case, we'll have
* history entry recording a jump from last instruction of parent state and
* first instruction of given state.
*/
static int get_prev_insn_idx(struct bpf_verifier_state *st, int i,
u32 *history)
{
u32 cnt = *history;

if (i == st->first_insn_idx) {
if (cnt == 0)
return -ENOENT;
if (cnt == 1 && st->jmp_history[0].idx == i)
return -ENOENT;
}

if (cnt && st->jmp_history[cnt - 1].idx == i) {
i = st->jmp_history[cnt - 1].prev_idx;
(*history)--;
Expand Down Expand Up @@ -4401,10 +4418,10 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
* Nothing to be tracked further in the parent state.
*/
return 0;
if (i == first_idx)
break;
subseq_idx = i;
i = get_prev_insn_idx(st, i, &history);
if (i == -ENOENT)
break;
if (i >= env->prog->len) {
/* This can happen if backtracking reached insn 0
* and there are still reg_mask or stack_mask
Expand Down Expand Up @@ -15439,15 +15456,16 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
struct bpf_verifier_env *env,
bool visit_callee)
{
int ret;
int ret, insn_sz;

ret = push_insn(t, t + 1, FALLTHROUGH, env, false);
insn_sz = bpf_is_ldimm64(&insns[t]) ? 2 : 1;
ret = push_insn(t, t + insn_sz, FALLTHROUGH, env, false);
if (ret)
return ret;

mark_prune_point(env, t + 1);
mark_prune_point(env, t + insn_sz);
/* when we exit from subprog, we need to record non-linear history */
mark_jmp_point(env, t + 1);
mark_jmp_point(env, t + insn_sz);

if (visit_callee) {
mark_prune_point(env, t);
Expand All @@ -15469,15 +15487,17 @@ static int visit_func_call_insn(int t, struct bpf_insn *insns,
static int visit_insn(int t, struct bpf_verifier_env *env)
{
struct bpf_insn *insns = env->prog->insnsi, *insn = &insns[t];
int ret, off;
int ret, off, insn_sz;

if (bpf_pseudo_func(insn))
return visit_func_call_insn(t, insns, env, true);

/* All non-branch instructions have a single fall-through edge. */
if (BPF_CLASS(insn->code) != BPF_JMP &&
BPF_CLASS(insn->code) != BPF_JMP32)
return push_insn(t, t + 1, FALLTHROUGH, env, false);
BPF_CLASS(insn->code) != BPF_JMP32) {
insn_sz = bpf_is_ldimm64(insn) ? 2 : 1;
return push_insn(t, t + insn_sz, FALLTHROUGH, env, false);
}

switch (BPF_OP(insn->code)) {
case BPF_EXIT:
Expand Down Expand Up @@ -15607,11 +15627,21 @@ static int check_cfg(struct bpf_verifier_env *env)
}

for (i = 0; i < insn_cnt; i++) {
struct bpf_insn *insn = &env->prog->insnsi[i];

if (insn_state[i] != EXPLORED) {
verbose(env, "unreachable insn %d\n", i);
ret = -EINVAL;
goto err_free;
}
if (bpf_is_ldimm64(insn)) {
if (insn_state[i + 1] != 0) {
verbose(env, "jump into the middle of ldimm64 insn %d\n", i);
ret = -EINVAL;
goto err_free;
}
i++; /* skip second half of ldimm64 */
}
}
ret = 0; /* cfg looks good */

Expand Down
40 changes: 40 additions & 0 deletions tools/testing/selftests/bpf/progs/verifier_precision.c
Original file line number Diff line number Diff line change
Expand Up @@ -91,3 +91,43 @@ __naked int bpf_end_bswap(void)
}

#endif /* v4 instruction */

SEC("?raw_tp")
__success __log_level(2)
/*
* Without the bug fix there will be no history between "last_idx 3 first_idx 3"
* and "parent state regs=" lines. "R0_w=6" parts are here to help anchor
* expected log messages to the one specific mark_chain_precision operation.
*
* This is quite fragile: if verifier checkpointing heuristic changes, this
* might need adjusting.
*/
__msg("2: (07) r0 += 1 ; R0_w=6")
__msg("3: (35) if r0 >= 0xa goto pc+1")
__msg("mark_precise: frame0: last_idx 3 first_idx 3 subseq_idx -1")
__msg("mark_precise: frame0: regs=r0 stack= before 2: (07) r0 += 1")
__msg("mark_precise: frame0: regs=r0 stack= before 1: (07) r0 += 1")
__msg("mark_precise: frame0: regs=r0 stack= before 4: (05) goto pc-4")
__msg("mark_precise: frame0: regs=r0 stack= before 3: (35) if r0 >= 0xa goto pc+1")
__msg("mark_precise: frame0: parent state regs= stack=: R0_rw=P4")
__msg("3: R0_w=6")
__naked int state_loop_first_last_equal(void)
{
asm volatile (
"r0 = 0;"
"l0_%=:"
"r0 += 1;"
"r0 += 1;"
/* every few iterations we'll have a checkpoint here with
* first_idx == last_idx, potentially confusing precision
* backtracking logic
*/
"if r0 >= 10 goto l1_%=;" /* checkpoint + mark_precise */
"goto l0_%=;"
"l1_%=:"
"exit;"
::: __clobber_common
);
}

char _license[] SEC("license") = "GPL";
8 changes: 4 additions & 4 deletions tools/testing/selftests/bpf/verifier/ld_imm64.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,8 @@
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.errstr = "invalid BPF_LD_IMM insn",
.errstr_unpriv = "R1 pointer comparison",
.errstr = "jump into the middle of ldimm64 insn 1",
.errstr_unpriv = "jump into the middle of ldimm64 insn 1",
.result = REJECT,
},
{
Expand All @@ -23,8 +23,8 @@
BPF_LD_IMM64(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid BPF_LD_IMM insn",
.errstr_unpriv = "R1 pointer comparison",
.errstr = "jump into the middle of ldimm64 insn 1",
.errstr_unpriv = "jump into the middle of ldimm64 insn 1",
.result = REJECT,
},
{
Expand Down

0 comments on commit 8c74b27

Please sign in to comment.