This patch migrate 32-bit zero extension insertion to new list patching
infrastructure.

Signed-off-by: Jiong Wang <jiong.w...@netronome.com>
---
 kernel/bpf/verifier.c | 45 +++++++++++++++++++++++++--------------------
 1 file changed, 25 insertions(+), 20 deletions(-)

diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 30ed28e..58d6bbe 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -8549,10 +8549,9 @@ static int opt_subreg_zext_lo32_rnd_hi32(struct 
bpf_verifier_env *env,
                                         const union bpf_attr *attr)
 {
        struct bpf_insn *patch, zext_patch[2], rnd_hi32_patch[4];
-       struct bpf_insn_aux_data *aux = env->insn_aux_data;
-       int i, patch_len, delta = 0, len = env->prog->len;
-       struct bpf_insn *insns = env->prog->insnsi;
-       struct bpf_prog *new_prog;
+       struct bpf_list_insn *list, *elem;
+       struct bpf_insn_aux_data *aux;
+       int patch_len, ret = 0;
        bool rnd_hi32;
 
        rnd_hi32 = attr->prog_flags & BPF_F_TEST_RND_HI32;
@@ -8560,12 +8559,16 @@ static int opt_subreg_zext_lo32_rnd_hi32(struct 
bpf_verifier_env *env,
        rnd_hi32_patch[1] = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, 0);
        rnd_hi32_patch[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32);
        rnd_hi32_patch[3] = BPF_ALU64_REG(BPF_OR, 0, BPF_REG_AX);
-       for (i = 0; i < len; i++) {
-               int adj_idx = i + delta;
-               struct bpf_insn insn;
 
-               insn = insns[adj_idx];
-               if (!aux[adj_idx].zext_dst) {
+       list = bpf_create_list_insn(env->prog);
+       if (IS_ERR(list))
+               return PTR_ERR(list);
+
+       for (elem = list; elem; elem = elem->next) {
+               struct bpf_insn insn = elem->insn;
+
+               aux = &env->insn_aux_data[elem->orig_idx - 1];
+               if (!aux->zext_dst) {
                        u8 code, class;
                        u32 imm_rnd;
 
@@ -8584,13 +8587,13 @@ static int opt_subreg_zext_lo32_rnd_hi32(struct 
bpf_verifier_env *env,
                        if (is_reg64(env, &insn, insn.dst_reg, NULL, DST_OP)) {
                                if (class == BPF_LD &&
                                    BPF_MODE(code) == BPF_IMM)
-                                       i++;
+                                       elem = elem->next;
                                continue;
                        }
 
                        /* ctx load could be transformed into wider load. */
                        if (class == BPF_LDX &&
-                           aux[adj_idx].ptr_type == PTR_TO_CTX)
+                           aux->ptr_type == PTR_TO_CTX)
                                continue;
 
                        imm_rnd = get_random_int();
@@ -8611,16 +8614,18 @@ static int opt_subreg_zext_lo32_rnd_hi32(struct 
bpf_verifier_env *env,
                patch = zext_patch;
                patch_len = 2;
 apply_patch_buffer:
-               new_prog = bpf_patch_insn_data(env, adj_idx, patch, patch_len);
-               if (!new_prog)
-                       return -ENOMEM;
-               env->prog = new_prog;
-               insns = new_prog->insnsi;
-               aux = env->insn_aux_data;
-               delta += patch_len - 1;
+               elem = bpf_patch_list_insn(elem, patch, patch_len);
+               if (IS_ERR(elem)) {
+                       ret = PTR_ERR(elem);
+                       goto free_list_ret;
+               }
        }
-
-       return 0;
+       env = verifier_linearize_list_insn(env, list);
+       if (IS_ERR(env))
+               ret = PTR_ERR(env);
+free_list_ret:
+       bpf_destroy_list_insn(list);
+       return ret;
 }
 
 /* convert load instructions that access fields of a context type into a
-- 
2.7.4

Reply via email to