Prefetch-with-intent-to-write is currently part of the XADD mapping in
the AArch64 JIT and follows the kernel's implementation of atomic_add.
This may interfere with other threads executing the LDXR/STXR loop,
leading to potential starvation and fairness issues. Drop the optional
prefetch instruction.

Fixes: 85f68fe89832 ("bpf, arm64: implement jiting of BPF_XADD")
Reported-by: Will Deacon <will.dea...@arm.com>
Signed-off-by: Daniel Borkmann <dan...@iogearbox.net>
Acked-by: Jean-Philippe Brucker <jean-philippe.bruc...@arm.com>
Acked-by: Will Deacon <will.dea...@arm.com>
---
 arch/arm64/net/bpf_jit.h      | 6 ------
 arch/arm64/net/bpf_jit_comp.c | 1 -
 2 files changed, 7 deletions(-)

diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
index 783de51..6c88165 100644
--- a/arch/arm64/net/bpf_jit.h
+++ b/arch/arm64/net/bpf_jit.h
@@ -100,12 +100,6 @@
 #define A64_STXR(sf, Rt, Rn, Rs) \
        A64_LSX(sf, Rt, Rn, Rs, STORE_EX)
 
-/* Prefetch */
-#define A64_PRFM(Rn, type, target, policy) \
-       aarch64_insn_gen_prefetch(Rn, AARCH64_INSN_PRFM_TYPE_##type, \
-                                 AARCH64_INSN_PRFM_TARGET_##target, \
-                                 AARCH64_INSN_PRFM_POLICY_##policy)
-
 /* Add/subtract (immediate) */
 #define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
        aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index aaddc02..a1420626 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -762,7 +762,6 @@ static int build_insn(const struct bpf_insn *insn, struct 
jit_ctx *ctx,
        case BPF_STX | BPF_XADD | BPF_DW:
                emit_a64_mov_i(1, tmp, off, ctx);
                emit(A64_ADD(1, tmp, tmp, dst), ctx);
-               emit(A64_PRFM(tmp, PST, L1, STRM), ctx);
                emit(A64_LDXR(isdw, tmp2, tmp), ctx);
                emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
                emit(A64_STXR(isdw, tmp2, tmp, tmp3), ctx);
-- 
2.9.5

Reply via email to