List linerization function will figure out the new jump destination of
patched/blinded jumps. No need of destination adjustment inside
bpf_jit_blind_insn any more.

Signed-off-by: Jiong Wang <jiong.w...@netronome.com>
---
 kernel/bpf/core.c | 76 ++++++++++++++++++++++++++-----------------------------
 1 file changed, 36 insertions(+), 40 deletions(-)

diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index e60703e..c3a5f84 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -1162,7 +1162,6 @@ static int bpf_jit_blind_insn(const struct bpf_insn *from,
 {
        struct bpf_insn *to = to_buff;
        u32 imm_rnd = get_random_int();
-       s16 off;
 
        BUILD_BUG_ON(BPF_REG_AX  + 1 != MAX_BPF_JIT_REG);
        BUILD_BUG_ON(MAX_BPF_REG + 1 != MAX_BPF_JIT_REG);
@@ -1234,13 +1233,10 @@ static int bpf_jit_blind_insn(const struct bpf_insn 
*from,
        case BPF_JMP | BPF_JSGE | BPF_K:
        case BPF_JMP | BPF_JSLE | BPF_K:
        case BPF_JMP | BPF_JSET | BPF_K:
-               /* Accommodate for extra offset in case of a backjump. */
-               off = from->off;
-               if (off < 0)
-                       off -= 2;
                *to++ = BPF_ALU64_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
                *to++ = BPF_ALU64_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
-               *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX, off);
+               *to++ = BPF_JMP_REG(from->code, from->dst_reg, BPF_REG_AX,
+                                   from->off);
                break;
 
        case BPF_JMP32 | BPF_JEQ  | BPF_K:
@@ -1254,14 +1250,10 @@ static int bpf_jit_blind_insn(const struct bpf_insn 
*from,
        case BPF_JMP32 | BPF_JSGE | BPF_K:
        case BPF_JMP32 | BPF_JSLE | BPF_K:
        case BPF_JMP32 | BPF_JSET | BPF_K:
-               /* Accommodate for extra offset in case of a backjump. */
-               off = from->off;
-               if (off < 0)
-                       off -= 2;
                *to++ = BPF_ALU32_IMM(BPF_MOV, BPF_REG_AX, imm_rnd ^ from->imm);
                *to++ = BPF_ALU32_IMM(BPF_XOR, BPF_REG_AX, imm_rnd);
                *to++ = BPF_JMP32_REG(from->code, from->dst_reg, BPF_REG_AX,
-                                     off);
+                                     from->off);
                break;
 
        case BPF_LD | BPF_IMM | BPF_DW:
@@ -1332,10 +1324,9 @@ void bpf_jit_prog_release_other(struct bpf_prog *fp, 
struct bpf_prog *fp_other)
 struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
 {
        struct bpf_insn insn_buff[16], aux[2];
-       struct bpf_prog *clone, *tmp;
-       int insn_delta, insn_cnt;
-       struct bpf_insn *insn;
-       int i, rewritten;
+       struct bpf_list_insn *list, *elem;
+       struct bpf_prog *clone, *ret_prog;
+       int rewritten;
 
        if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
                return prog;
@@ -1344,43 +1335,48 @@ struct bpf_prog *bpf_jit_blind_constants(struct 
bpf_prog *prog)
        if (!clone)
                return ERR_PTR(-ENOMEM);
 
-       insn_cnt = clone->len;
-       insn = clone->insnsi;
+       list = bpf_create_list_insn(clone);
+       if (IS_ERR(list))
+               return (struct bpf_prog *)list;
+
+       /* kill uninitialized warning on some gcc versions. */
+       memset(&aux, 0, sizeof(aux));
+
+       for (elem = list; elem; elem = elem->next) {
+               struct bpf_list_insn *next = elem->next;
+               struct bpf_insn insn = elem->insn;
 
-       for (i = 0; i < insn_cnt; i++, insn++) {
                /* We temporarily need to hold the original ld64 insn
                 * so that we can still access the first part in the
                 * second blinding run.
                 */
-               if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW) &&
-                   insn[1].code == 0)
-                       memcpy(aux, insn, sizeof(aux));
+               if (insn.code == (BPF_LD | BPF_IMM | BPF_DW)) {
+                       struct bpf_insn next_insn = next->insn;
 
-               rewritten = bpf_jit_blind_insn(insn, aux, insn_buff);
+                       if (next_insn.code == 0) {
+                               aux[0] = insn;
+                               aux[1] = next_insn;
+                       }
+               }
+
+               rewritten = bpf_jit_blind_insn(&insn, aux, insn_buff);
                if (!rewritten)
                        continue;
 
-               tmp = bpf_patch_insn_single(clone, i, insn_buff, rewritten);
-               if (IS_ERR(tmp)) {
-                       /* Patching may have repointed aux->prog during
-                        * realloc from the original one, so we need to
-                        * fix it up here on error.
-                        */
-                       bpf_jit_prog_release_other(prog, clone);
-                       return tmp;
+               elem = bpf_patch_list_insn(elem, insn_buff, rewritten);
+               if (IS_ERR(elem)) {
+                       ret_prog = (struct bpf_prog *)elem;
+                       goto free_list_ret;
                }
-
-               clone = tmp;
-               insn_delta = rewritten - 1;
-
-               /* Walk new program and skip insns we just inserted. */
-               insn = clone->insnsi + i + insn_delta;
-               insn_cnt += insn_delta;
-               i        += insn_delta;
        }
 
-       clone->blinded = 1;
-       return clone;
+       clone = bpf_linearize_list_insn(clone, list);
+       if (!IS_ERR(clone))
+               clone->blinded = 1;
+       ret_prog = clone;
+free_list_ret:
+       bpf_destroy_list_insn(list);
+       return ret_prog;
 }
 #endif /* CONFIG_BPF_JIT */
 
-- 
2.7.4

Reply via email to