This patch implements code-gen for new JMP32 instructions on arm.

Cc: Shubham Bansal <illusionist....@gmail.com>
Signed-off-by: Jiong Wang <jiong.w...@netronome.com>
---
 arch/arm/net/bpf_jit_32.c | 23 +++++++++++++++++------
 1 file changed, 17 insertions(+), 6 deletions(-)

diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 25b3ee8..e9830a7 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -1083,7 +1083,8 @@ static inline void emit_ldx_r(const s8 dst[], const s8 
src,
 
 /* Arithmatic Operation */
 static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm,
-                            const u8 rn, struct jit_ctx *ctx, u8 op) {
+                            const u8 rn, struct jit_ctx *ctx, u8 op,
+                            bool is_jmp64) {
        switch (op) {
        case BPF_JSET:
                emit(ARM_AND_R(ARM_IP, rt, rn), ctx);
@@ -1096,18 +1097,25 @@ static inline void emit_ar_r(const u8 rd, const u8 rt, 
const u8 rm,
        case BPF_JGE:
        case BPF_JLE:
        case BPF_JLT:
-               emit(ARM_CMP_R(rd, rm), ctx);
-               _emit(ARM_COND_EQ, ARM_CMP_R(rt, rn), ctx);
+               if (is_jmp64) {
+                       emit(ARM_CMP_R(rd, rm), ctx);
+                       /* Only compare low halve if high halve are equal. */
+                       _emit(ARM_COND_EQ, ARM_CMP_R(rt, rn), ctx);
+               } else {
+                       emit(ARM_CMP_R(rt, rn), ctx);
+               }
                break;
        case BPF_JSLE:
        case BPF_JSGT:
                emit(ARM_CMP_R(rn, rt), ctx);
-               emit(ARM_SBCS_R(ARM_IP, rm, rd), ctx);
+               if (is_jmp64)
+                       emit(ARM_SBCS_R(ARM_IP, rm, rd), ctx);
                break;
        case BPF_JSLT:
        case BPF_JSGE:
                emit(ARM_CMP_R(rt, rn), ctx);
-               emit(ARM_SBCS_R(ARM_IP, rd, rm), ctx);
+               if (is_jmp64)
+                       emit(ARM_SBCS_R(ARM_IP, rd, rm), ctx);
                break;
        }
 }
@@ -1326,6 +1334,7 @@ static int build_insn(const struct bpf_insn *insn, struct 
jit_ctx *ctx)
        const s8 *rd, *rs;
        s8 rd_lo, rt, rm, rn;
        s32 jmp_offset;
+       bool is_jmp64;
 
 #define check_imm(bits, imm) do {                              \
        if ((imm) >= (1 << ((bits) - 1)) ||                     \
@@ -1615,6 +1624,7 @@ static int build_insn(const struct bpf_insn *insn, struct 
jit_ctx *ctx)
        case BPF_JMP | BPF_JLT | BPF_X:
        case BPF_JMP | BPF_JSLT | BPF_X:
        case BPF_JMP | BPF_JSLE | BPF_X:
+               is_jmp64 = !imm;
                /* Setup source registers */
                rm = arm_bpf_get_reg32(src_hi, tmp2[0], ctx);
                rn = arm_bpf_get_reg32(src_lo, tmp2[1], ctx);
@@ -1641,6 +1651,7 @@ static int build_insn(const struct bpf_insn *insn, struct 
jit_ctx *ctx)
        case BPF_JMP | BPF_JLE | BPF_K:
        case BPF_JMP | BPF_JSLT | BPF_K:
        case BPF_JMP | BPF_JSLE | BPF_K:
+               is_jmp64 = !insn->src_reg;
                if (off == 0)
                        break;
                rm = tmp2[0];
@@ -1652,7 +1663,7 @@ static int build_insn(const struct bpf_insn *insn, struct 
jit_ctx *ctx)
                rd = arm_bpf_get_reg64(dst, tmp, ctx);
 
                /* Check for the condition */
-               emit_ar_r(rd[0], rd[1], rm, rn, ctx, BPF_OP(code));
+               emit_ar_r(rd[0], rd[1], rm, rn, ctx, BPF_OP(code), is_jmp64);
 
                /* Setup JUMP instruction */
                jmp_offset = bpf2a32_offset(i+off, i, ctx);
-- 
2.7.4

Reply via email to