From: Abhishek Dubey <[email protected]>

Move the long branch address space to the bottom of the long
branch stub. This allows uninterrupted disassembly until the
last 8 bytes. Exclude these last bytes from the overall
program length to prevent failure in assembly generation.
Also, align dummy_tramp_addr field with 8-byte boundary.

Following is disassembler output for test program with moved down
dummy_tramp_addr field:
.....
.....
pc:68    left:44     a6 03 08 7c  :  mtlr 0
pc:72    left:40     bc ff ff 4b  :  b .-68
pc:76    left:36     a6 02 68 7d  :  mflr 11
pc:80    left:32     05 00 9f 42  :  bcl 20, 31, .+4
pc:84    left:28     a6 02 88 7d  :  mflr 12
pc:88    left:24     14 00 8c e9  :  ld 12, 20(12)
pc:92    left:20     a6 03 89 7d  :  mtctr 12
pc:96    left:16     a6 03 68 7d  :  mtlr 11
pc:100   left:12     20 04 80 4e  :  bctr
pc:104   left:8      c0 34 1d 00  :

Failure log:
Can't disasm instruction at offset 104: c0 34 1d 00 00 00 00 c0
Disassembly logic can truncate at 104, ignoring last 8 bytes.

Update the dummy_tramp_addr field offset calculation from the end
of the program to reflect its new location, for bpf_arch_text_poke()
to update the actual trampoline's address in this field.

All BPF trampoline selftests continue to pass with this patch applied.

Signed-off-by: Abhishek Dubey <[email protected]>
---
 arch/powerpc/net/bpf_jit_comp.c | 42 +++++++++++++++++++--------------
 1 file changed, 24 insertions(+), 18 deletions(-)

diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index c255b30a37b0..c2ac4e355464 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -58,20 +58,22 @@ void bpf_jit_build_fentry_stubs(u32 *image, u32 *fimage, 
struct codegen_context
         * in the fimage. The alignment NOP must appear before OOL stub,
         * to make ool_stub_idx & long_branch_stub_idx constant from end.
         *
+        * The dummy_tramp_addr field is placed at bottom of Long branch stub.
+        *
         * Need alignment NOP in following conditions:
         *
         * OOL stub aligned     CONFIG_PPC_FTRACE_OUT_OF_LINE   Alignment NOP
-        *      Y                               Y                     N
-        *      Y                               N                     Y
-        *      N                               Y                     Y
-        *      N                               N                     N
+        *      Y                               Y                     Y
+        *      Y                               N                     N
+        *      N                               Y                     N
+        *      N                               N                     Y
         */
 #ifdef CONFIG_PPC64
        if (fimage && image) {
                unsigned long pc = (unsigned long)fimage + CTX_NIA(ctx);
 
-               if (IS_ALIGNED(pc, 8) ^
-                       IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE))
+               if (~(IS_ALIGNED(pc, 8) ^
+                       IS_ENABLED(CONFIG_PPC_FTRACE_OUT_OF_LINE)))
                        EMIT(PPC_RAW_NOP());
        }
 #endif
@@ -93,28 +95,29 @@ void bpf_jit_build_fentry_stubs(u32 *image, u32 *fimage, 
struct codegen_context
 
        /*
         * Long branch stub:
-        *      .long   <dummy_tramp_addr>  // 8-byte aligned
         *      mflr    r11
         *      bcl     20,31,$+4
-        *      mflr    r12
-        *      ld      r12, -8-SZL(r12)
+        *      mflr    r12     // lr/r12 stores pc of current(this) inst.
+        *      ld      r12, 20(r12) // offset(dummy_tramp_addr) from prev 
inst. is 20
         *      mtctr   r12
-        *      mtlr    r11 // needed to retain ftrace ABI
+        *      mtlr    r11     // needed to retain ftrace ABI
         *      bctr
+        *      .long   <dummy_tramp_addr>  // 8-byte aligned
         */
-       if (image)
-               *((unsigned long *)&image[ctx->idx]) = (unsigned 
long)dummy_tramp;
-
-       ctx->idx += SZL / 4;
        long_branch_stub_idx = ctx->idx;
        EMIT(PPC_RAW_MFLR(_R11));
        EMIT(PPC_RAW_BCL4());
        EMIT(PPC_RAW_MFLR(_R12));
-       EMIT(PPC_RAW_LL(_R12, _R12, -8-SZL));
+       EMIT(PPC_RAW_LL(_R12, _R12, 20));
        EMIT(PPC_RAW_MTCTR(_R12));
        EMIT(PPC_RAW_MTLR(_R11));
        EMIT(PPC_RAW_BCTR());
 
+       if (image)
+               *((unsigned long *)&image[ctx->idx]) = (unsigned 
long)dummy_tramp;
+
+       ctx->idx += SZL / 4;
+
        if (!bpf_jit_ool_stub) {
                bpf_jit_ool_stub = (ctx->idx - ool_stub_idx) * 4;
                bpf_jit_long_branch_stub = (ctx->idx - long_branch_stub_idx) * 
4;
@@ -1155,6 +1158,7 @@ static void do_isync(void *info __maybe_unused)
  * bpf_func:
  *     [nop|b] ool_stub
  * 2. Out-of-line stub:
+ *     nop     // optional nop for alignment
  * ool_stub:
  *     mflr    r0
  *     [b|bl]  <bpf_prog>/<long_branch_stub>
@@ -1162,14 +1166,14 @@ static void do_isync(void *info __maybe_unused)
  *     b       bpf_func + 4
  * 3. Long branch stub:
  * long_branch_stub:
- *     .long   <branch_addr>/<dummy_tramp>
  *     mflr    r11
  *     bcl     20,31,$+4
  *     mflr    r12
- *     ld      r12, -16(r12)
+ *     ld      r12, 20(r12)
  *     mtctr   r12
  *     mtlr    r11 // needed to retain ftrace ABI
  *     bctr
+ *     .long   <branch_addr>/<dummy_tramp>
  *
  * dummy_tramp is used to reduce synchronization requirements.
  *
@@ -1271,10 +1275,12 @@ int bpf_arch_text_poke(void *ip, enum 
bpf_text_poke_type old_t,
         * 1. Update the address in the long branch stub:
         * If new_addr is out of range, we will have to use the long branch 
stub, so patch new_addr
         * here. Otherwise, revert to dummy_tramp, but only if we had patched 
old_addr here.
+        *
+        * dummy_tramp_addr moved to bottom of long branch stub.
         */
        if ((new_addr && !is_offset_in_branch_range(new_addr - ip)) ||
            (old_addr && !is_offset_in_branch_range(old_addr - ip)))
-               ret = patch_ulong((void *)(bpf_func_end - 
bpf_jit_long_branch_stub - SZL),
+               ret = patch_ulong((void *)(bpf_func_end - SZL), /* SZL: 
dummy_tramp_addr offset */
                                  (new_addr && 
!is_offset_in_branch_range(new_addr - ip)) ?
                                  (unsigned long)new_addr : (unsigned 
long)dummy_tramp);
        if (ret)
-- 
2.52.0


Reply via email to