When the BPF program was starting with a conditional jump only one
(true) execution branch of the program was evaluated. Any instructions
jumped over were not evaluated and could contain invalid operations.
The root cause was using zero instruction index as a signal for ending
evaluation when backtracking.
Switch from using previous instruction index for tracking execution
history to a previous instruction pointer. First instruction will not
have it set, and therefore backtracking _from_ it will end evaluation,
not backtracking _to_ it like before.
Add two tests demostrating the problem:
* test_jump_over_invalid_first: loads BPF program with
conditional jump over the invalid operation, should not succeeed;
* test_jump_over_invalid_non_first: same program with one extra
instruction at the start to demostrate that it is indeed invalid
(and also guard against another kind of regression);
Fixes: 6e12ec4c4d ("bpf: add more checks")
Signed-off-by: Marat Khalili <[email protected]>
---
app/test/test_bpf.c | 80 ++++++++++++++++++++++++++++++++++++++++++
lib/bpf/bpf_validate.c | 20 ++++-------
2 files changed, 86 insertions(+), 14 deletions(-)
diff --git a/app/test/test_bpf.c b/app/test/test_bpf.c
index c25f5a6f4d..3f04cb28be 100644
--- a/app/test/test_bpf.c
+++ b/app/test/test_bpf.c
@@ -208,6 +208,86 @@ test_subtract_one(void)
REGISTER_FAST_TEST(bpf_subtract_one_autotest, true, true, test_subtract_one);
+/*
+ * Conditionally jump over invalid operation as first instruction.
+ */
+static int
+test_jump_over_invalid_first(void)
+{
+ static const struct ebpf_insn ins[] = {
+ {
+ /* Jump over the next instruction for some r1. */
+ .code = (BPF_JMP | BPF_JEQ | BPF_K),
+ .dst_reg = EBPF_REG_1,
+ .imm = 42,
+ .off = 1,
+ },
+ {
+ /* Write 0xDEADBEEF to [r1 + INT16_MIN]. */
+ .code = (BPF_ST | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .off = INT16_MIN,
+ .imm = 0xDEADBEEF,
+ },
+ {
+ /* Set return value to the program argument. */
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
+ .src_reg = EBPF_REG_1,
+ .dst_reg = EBPF_REG_0,
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+ };
+ return bpf_load_test(RTE_DIM(ins), ins, EINVAL);
+}
+
+REGISTER_FAST_TEST(bpf_jump_over_invalid_first_autotest, true, true,
+ test_jump_over_invalid_first);
+
+/*
+ * Conditionally jump over invalid operation as non-first instruction.
+ */
+static int
+test_jump_over_invalid_non_first(void)
+{
+ static const struct ebpf_insn ins[] = {
+ {
+ /* Set return value to the program argument. */
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
+ .src_reg = EBPF_REG_1,
+ .dst_reg = EBPF_REG_0,
+ },
+ {
+ /* Jump over the next instruction for some r1. */
+ .code = (BPF_JMP | BPF_JEQ | BPF_K),
+ .dst_reg = EBPF_REG_1,
+ .imm = 42,
+ .off = 1,
+ },
+ {
+ /* Write 0xDEADBEEF to [r1 + INT16_MIN]. */
+ .code = (BPF_ST | BPF_MEM | EBPF_DW),
+ .dst_reg = EBPF_REG_1,
+ .off = INT16_MIN,
+ .imm = 0xDEADBEEF,
+ },
+ {
+ /* Set return value to the program argument. */
+ .code = (EBPF_ALU64 | EBPF_MOV | BPF_X),
+ .src_reg = EBPF_REG_1,
+ .dst_reg = EBPF_REG_0,
+ },
+ {
+ .code = (BPF_JMP | EBPF_EXIT),
+ },
+ };
+ return bpf_load_test(RTE_DIM(ins), ins, EINVAL);
+}
+
+REGISTER_FAST_TEST(bpf_jump_over_invalid_non_first_autotest, true, true,
+ test_jump_over_invalid_non_first);
+
/*
* Basic functional tests for librte_bpf.
* The main procedure - load eBPF program, execute it and
diff --git a/lib/bpf/bpf_validate.c b/lib/bpf/bpf_validate.c
index 47ad6fef0f..64a8f227a3 100644
--- a/lib/bpf/bpf_validate.c
+++ b/lib/bpf/bpf_validate.c
@@ -64,7 +64,7 @@ struct inst_node {
uint8_t cur_edge:4;
uint8_t edge_type[MAX_EDGES];
uint32_t edge_dest[MAX_EDGES];
- uint32_t prev_node;
+ struct inst_node *prev_node;
struct {
struct bpf_eval_state *cur; /* save/restore for jcc targets */
struct bpf_eval_state *start;
@@ -1875,12 +1875,6 @@ set_edge_type(struct bpf_verifier *bvf, struct inst_node
*node,
bvf->edge_type[type]++;
}
-static struct inst_node *
-get_prev_node(struct bpf_verifier *bvf, struct inst_node *node)
-{
- return bvf->in + node->prev_node;
-}
-
/*
* Depth-First Search (DFS) through previously constructed
* Control Flow Graph (CFG).
@@ -1916,7 +1910,7 @@ dfs(struct bpf_verifier *bvf)
if (next != NULL) {
/* proceed with next child */
- next->prev_node = get_node_idx(bvf, node);
+ next->prev_node = node;
node = next;
} else {
/*
@@ -1925,7 +1919,7 @@ dfs(struct bpf_verifier *bvf)
*/
set_node_colour(bvf, node, BLACK);
node->cur_edge = 0;
- node = get_prev_node(bvf, node);
+ node = node->prev_node;
}
} else
node = NULL;
@@ -2490,7 +2484,7 @@ evaluate(struct bpf_verifier *bvf)
next = NULL;
stats.nb_prune++;
} else {
- next->prev_node = get_node_idx(bvf, node);
+ next->prev_node = node;
node = next;
}
} else {
@@ -2501,11 +2495,9 @@ evaluate(struct bpf_verifier *bvf)
*/
node->cur_edge = 0;
save_safe_eval_state(bvf, node);
- node = get_prev_node(bvf, node);
+ node = node->prev_node;
- /* finished */
- if (node == bvf->in)
- node = NULL;
+ /* first node will not have prev, signalling finish */
}
}
--
2.43.0