commit:     27474f966c5c419ea4fd3a7cb050071d0f9d84c6
Author:     Raymond Wong <infiwang <AT> pm <DOT> me>
AuthorDate: Sun May  8 15:57:29 2022 +0000
Commit:     Jakov Smolić <jsmolic <AT> gentoo <DOT> org>
CommitDate: Sun May  8 19:27:55 2022 +0000
URL:        https://gitweb.gentoo.org/repo/gentoo.git/commit/?id=27474f96

dev-libs/ffcall-2.4: add vacall PIC support for Linux/riscv

Patch is already done upstream, drop on next version bump.

Closes: https://bugs.gentoo.org/842915
Signed-off-by: Raymond Wong <infiwang <AT> pm.me>
Closes: https://github.com/gentoo/gentoo/pull/25388
Signed-off-by: Jakov Smolić <jsmolic <AT> gentoo.org>

 dev-libs/ffcall/ffcall-2.4.ebuild                  |   7 +-
 .../ffcall/files/ffcall-2.4-vacall-riscv-pic.patch | 792 +++++++++++++++++++++
 2 files changed, 798 insertions(+), 1 deletion(-)

diff --git a/dev-libs/ffcall/ffcall-2.4.ebuild 
b/dev-libs/ffcall/ffcall-2.4.ebuild
index 9e5e111bec68..8a30bf885b1d 100644
--- a/dev-libs/ffcall/ffcall-2.4.ebuild
+++ b/dev-libs/ffcall/ffcall-2.4.ebuild
@@ -19,6 +19,11 @@ LICENSE="GPL-2+"
 SLOT="0"
 KEYWORDS="~alpha amd64 ~arm64 ~hppa ~ia64 ~mips ppc ppc64 sparc x86 
~amd64-linux ~x86-linux ~ppc-macos ~sparc-solaris ~x86-solaris"
 
+PATCHES=(
+       # bug 842915, drop on next version bump
+       "${FILESDIR}"/${PN}-2.4-vacall-riscv-pic.patch
+)
+
 src_prepare() {
        # The build system is a strange mix of autogenerated
        # files and manual tweaks on top. Uses $CFLAGS / $LDFLAGS randomly.
@@ -33,7 +38,7 @@ src_prepare() {
                        -i "${mfi}" || die
        done
 
-       eapply_user
+       default
 }
 
 src_configure() {

diff --git a/dev-libs/ffcall/files/ffcall-2.4-vacall-riscv-pic.patch 
b/dev-libs/ffcall/files/ffcall-2.4-vacall-riscv-pic.patch
new file mode 100644
index 000000000000..2bad9a403997
--- /dev/null
+++ b/dev-libs/ffcall/files/ffcall-2.4-vacall-riscv-pic.patch
@@ -0,0 +1,792 @@
+https://bugs.gentoo.org/842915
+Taken from https://alpha.gnu.org/gnu/libffcall/libffcall-2.4-20220508.tar.gz
+
+diff -ruN libffcall-2.4/vacall/Makefile.devel 
libffcall-2.4-20220508/vacall/Makefile.devel
+--- libffcall-2.4/vacall/Makefile.devel        2021-06-12 23:11:57.000000000 
+0800
++++ libffcall-2.4-20220508/vacall/Makefile.devel       2022-05-08 
23:28:37.266397740 +0800
+@@ -267,15 +267,21 @@
+ vacall-riscv32-ilp32d-linux.s : vacall-riscv32.c vacall-internal.h vacall.h 
$(THISFILE)
+       $(CROSS_TOOL) riscv32-linux gcc-7.3.0 $(GCCFLAGS) -D__riscv32__ -S 
vacall-riscv32.c -o vacall-riscv32-ilp32d-linux.s
+ 
+-vacall-riscv32-ilp32d-macro.S : vacall-riscv32-ilp32d-linux.s 
../common/asm-riscv.sh ../common/noexecstack.h $(THISFILE)
+-      (../common/asm-riscv.sh < vacall-riscv32-ilp32d-linux.s ; cat 
../common/noexecstack.h) > vacall-riscv32-ilp32d-macro.S
++vacall-riscv32-ilp32d-linux-pic.s : vacall-riscv32.c vacall-internal.h 
vacall.h $(THISFILE)
++      $(CROSS_TOOL) riscv32-linux gcc-7.3.0 $(GCCFLAGS) -fPIC -D__riscv32__ 
-S vacall-riscv32.c -o vacall-riscv32-ilp32d-linux-pic.s
++
++vacall-riscv32-ilp32d-macro.S : vacall-riscv32-ilp32d-linux.s 
vacall-riscv32-ilp32d-linux-pic.s ../common/asm-riscv.sh 
../common/noexecstack.h $(THISFILE)
++      (echo '#ifdef __PIC__' ; ../common/asm-riscv.sh < 
vacall-riscv32-ilp32d-linux-pic.s ; echo '#else' ; ../common/asm-riscv.sh < 
vacall-riscv32-ilp32d-linux.s ; echo '#endif' ; cat ../common/noexecstack.h) > 
vacall-riscv32-ilp32d-macro.S
+ 
+ 
+ vacall-riscv64-lp64d-linux.s : vacall-riscv64.c vacall-internal.h vacall.h 
$(THISFILE)
+       $(CROSS_TOOL) riscv64-linux gcc-7.3.0 $(GCCFLAGS) -D__riscv64__ -S 
vacall-riscv64.c -o vacall-riscv64-lp64d-linux.s
+ 
+-vacall-riscv64-lp64d-macro.S : vacall-riscv64-lp64d-linux.s 
../common/asm-riscv.sh ../common/noexecstack.h $(THISFILE)
+-      (../common/asm-riscv.sh < vacall-riscv64-lp64d-linux.s ; cat 
../common/noexecstack.h) > vacall-riscv64-lp64d-macro.S
++vacall-riscv64-lp64d-linux-pic.s : vacall-riscv64.c vacall-internal.h 
vacall.h $(THISFILE)
++      $(CROSS_TOOL) riscv64-linux gcc-7.3.0 $(GCCFLAGS) -fPIC -D__riscv64__ 
-S vacall-riscv64.c -o vacall-riscv64-lp64d-linux-pic.s
++
++vacall-riscv64-lp64d-macro.S : vacall-riscv64-lp64d-linux.s 
vacall-riscv64-lp64d-linux-pic.s ../common/asm-riscv.sh ../common/noexecstack.h 
$(THISFILE)
++      (echo '#ifdef __PIC__' ; ../common/asm-riscv.sh < 
vacall-riscv64-lp64d-linux-pic.s ; echo '#else' ; ../common/asm-riscv.sh < 
vacall-riscv64-lp64d-linux.s ; echo '#endif' ; cat ../common/noexecstack.h) > 
vacall-riscv64-lp64d-macro.S
+ 
+ 
+ # --------------- Rules for debugging test failures ---------------
+diff -ruN libffcall-2.4/vacall/Makefile.in 
libffcall-2.4-20220508/vacall/Makefile.in
+--- libffcall-2.4/vacall/Makefile.in   2021-06-12 23:11:57.000000000 +0800
++++ libffcall-2.4-20220508/vacall/Makefile.in  2022-05-08 23:29:18.347087098 
+0800
+@@ -354,8 +354,8 @@
+   vacall-powerpc-aix.s \
+   vacall-powerpc-linux.s vacall-powerpc-linux-macro.S vacall-powerpc-macos.s 
vacall-powerpc-sysv4-macro.S \
+   vacall-powerpc64.c vacall-powerpc64-aix.s vacall-powerpc64-linux.S 
vacall-powerpc64-elfv2-linux.S \
+-  vacall-riscv32.c vacall-riscv32-ilp32d-linux.s 
vacall-riscv32-ilp32d-macro.S \
+-  vacall-riscv64.c vacall-riscv64-lp64d-linux.s vacall-riscv64-lp64d-macro.S \
++  vacall-riscv32.c vacall-riscv32-ilp32d-linux.s 
vacall-riscv32-ilp32d-linux-pic.s vacall-riscv32-ilp32d-macro.S \
++  vacall-riscv64.c vacall-riscv64-lp64d-linux.s 
vacall-riscv64-lp64d-linux-pic.s vacall-riscv64-lp64d-macro.S \
+   vacall-s390.c vacall-s390-linux.s vacall-s390-macro.S \
+   vacall-s390x.c vacall-s390x-linux.s vacall-s390x-macro.S \
+   vacall-sparc.c vacall-sparc-linux.s vacall-sparc-linux-pic.s 
vacall-sparc-macro.S \
+diff -ruN libffcall-2.4/vacall/vacall-riscv32-ilp32d-linux-pic.s 
libffcall-2.4-20220508/vacall/vacall-riscv32-ilp32d-linux-pic.s
+--- libffcall-2.4/vacall/vacall-riscv32-ilp32d-linux-pic.s     1970-01-01 
08:00:00.000000000 +0800
++++ libffcall-2.4-20220508/vacall/vacall-riscv32-ilp32d-linux-pic.s    
2022-05-08 19:41:49.000000000 +0800
+@@ -0,0 +1,163 @@
++      .file   "vacall-riscv32.c"
++      .option pic
++      .text
++      .align  1
++      .globl  vacall_receiver
++      .type   vacall_receiver, @function
++vacall_receiver:
++      add     sp,sp,-208
++      sw      ra,188(sp)
++      sw      s0,184(sp)
++      add     s0,sp,192
++      la      t1,vacall_function
++      lw      t1,0(t1)
++      sw      a0,-148(s0)
++      add     a0,s0,16
++      sw      a7,12(s0)
++      sw      a1,-144(s0)
++      sw      a2,-140(s0)
++      sw      a3,-136(s0)
++      sw      a4,-132(s0)
++      sw      a5,-128(s0)
++      sw      a6,-124(s0)
++      sw      a7,-120(s0)
++      fsw     fa0,-112(s0)
++      fsw     fa1,-108(s0)
++      fsw     fa2,-104(s0)
++      fsw     fa3,-100(s0)
++      fsw     fa4,-96(s0)
++      fsw     fa5,-92(s0)
++      fsw     fa6,-88(s0)
++      fsw     fa7,-84(s0)
++      fsd     fa0,-80(s0)
++      fsd     fa1,-72(s0)
++      fsd     fa2,-64(s0)
++      fsd     fa3,-56(s0)
++      fsd     fa4,-48(s0)
++      fsd     fa5,-40(s0)
++      fsd     fa6,-32(s0)
++      fsd     fa7,-24(s0)
++      sw      a0,-168(s0)
++      sw      zero,-184(s0)
++      sw      zero,-164(s0)
++      sw      zero,-160(s0)
++      add     a0,s0,-184
++      sw      zero,-152(s0)
++      sw      zero,-116(s0)
++      jalr    t1
++      lw      t1,-160(s0)
++      beqz    t1,.L1
++      li      t3,1
++      beq     t1,t3,.L22
++      li      t3,2
++      beq     t1,t3,.L25
++      li      t3,3
++      beq     t1,t3,.L22
++      li      t3,4
++      beq     t1,t3,.L26
++      li      t3,5
++      beq     t1,t3,.L27
++      li      t3,6
++      beq     t1,t3,.L23
++      li      t3,7
++      beq     t1,t3,.L23
++      li      t3,8
++      beq     t1,t3,.L23
++      li      t3,9
++      beq     t1,t3,.L23
++      add     t3,t1,-10
++      li      t4,1
++      bleu    t3,t4,.L28
++      li      t3,12
++      beq     t1,t3,.L29
++      li      t3,13
++      beq     t1,t3,.L30
++      li      t3,14
++      beq     t1,t3,.L23
++      li      t3,15
++      bne     t1,t3,.L1
++      lw      t1,-184(s0)
++      and     t1,t1,2
++      beqz    t1,.L1
++      lw      t3,-156(s0)
++      li      t1,7
++      add     t5,t3,-1
++      bgtu    t5,t1,.L1
++      lw      t1,-164(s0)
++      lbu     t5,0(t1)
++      mv      a0,t5
++      beq     t3,t4,.L1
++      lbu     t4,1(t1)
++      li      t6,2
++      sll     t4,t4,8
++      or      t5,t4,t5
++      mv      a0,t5
++      beq     t3,t6,.L1
++      lbu     t4,2(t1)
++      li      t6,3
++      sll     t4,t4,16
++      or      t4,t4,t5
++      mv      a0,t4
++      beq     t3,t6,.L1
++      lbu     a0,3(t1)
++      li      t5,4
++      sll     a0,a0,24
++      or      a0,a0,t4
++      beq     t3,t5,.L1
++      lbu     t5,4(t1)
++      li      t4,5
++      mv      a1,t5
++      beq     t3,t4,.L1
++      lbu     t4,5(t1)
++      li      t6,6
++      sll     t4,t4,8
++      or      t5,t4,t5
++      mv      a1,t5
++      beq     t3,t6,.L1
++      lbu     t4,6(t1)
++      li      t6,8
++      sll     t4,t4,16
++      or      t4,t4,t5
++      mv      a1,t4
++      bne     t3,t6,.L1
++      lbu     a1,7(t1)
++      sll     a1,a1,24
++      or      a1,a1,t4
++.L1:
++      lw      ra,188(sp)
++      lw      s0,184(sp)
++      add     sp,sp,208
++      jr      ra
++.L22:
++      lbu     a0,-176(s0)
++      lw      ra,188(sp)
++      lw      s0,184(sp)
++      add     sp,sp,208
++      jr      ra
++.L23:
++      lw      a0,-176(s0)
++      j       .L1
++.L25:
++      lb      a0,-176(s0)
++      lw      ra,188(sp)
++      lw      s0,184(sp)
++      add     sp,sp,208
++      jr      ra
++.L26:
++      lh      a0,-176(s0)
++      j       .L1
++.L27:
++      lhu     a0,-176(s0)
++      j       .L1
++.L28:
++      lw      a0,-176(s0)
++      lw      a1,-172(s0)
++      j       .L1
++.L29:
++      flw     fa0,-176(s0)
++      j       .L1
++.L30:
++      fld     fa0,-176(s0)
++      j       .L1
++      .size   vacall_receiver, .-vacall_receiver
++      .ident  "GCC: (GNU) 7.3.0"
+diff -ruN libffcall-2.4/vacall/vacall-riscv32-ilp32d-macro.S 
libffcall-2.4-20220508/vacall/vacall-riscv32-ilp32d-macro.S
+--- libffcall-2.4/vacall/vacall-riscv32-ilp32d-macro.S 2021-06-13 
02:30:16.000000000 +0800
++++ libffcall-2.4-20220508/vacall/vacall-riscv32-ilp32d-macro.S        
2022-05-08 19:41:49.000000000 +0800
+@@ -1,3 +1,167 @@
++#ifdef __PIC__
++      .file   "vacall-riscv32.c"
++      .option pic
++      .text
++      .align  1
++      .globl  vacall_receiver
++      .type   vacall_receiver, @function
++vacall_receiver:
++      add     sp,sp,-208
++      sw      ra,188(sp)
++      sw      s0,184(sp)
++      add     s0,sp,192
++      la      t1,vacall_function
++      lw      t1,0(t1)
++      sw      a0,-148(s0)
++      add     a0,s0,16
++      sw      a7,12(s0)
++      sw      a1,-144(s0)
++      sw      a2,-140(s0)
++      sw      a3,-136(s0)
++      sw      a4,-132(s0)
++      sw      a5,-128(s0)
++      sw      a6,-124(s0)
++      sw      a7,-120(s0)
++      fsw     fa0,-112(s0)
++      fsw     fa1,-108(s0)
++      fsw     fa2,-104(s0)
++      fsw     fa3,-100(s0)
++      fsw     fa4,-96(s0)
++      fsw     fa5,-92(s0)
++      fsw     fa6,-88(s0)
++      fsw     fa7,-84(s0)
++      fsd     fa0,-80(s0)
++      fsd     fa1,-72(s0)
++      fsd     fa2,-64(s0)
++      fsd     fa3,-56(s0)
++      fsd     fa4,-48(s0)
++      fsd     fa5,-40(s0)
++      fsd     fa6,-32(s0)
++      fsd     fa7,-24(s0)
++      sw      a0,-168(s0)
++      sw      zero,-184(s0)
++      sw      zero,-164(s0)
++      sw      zero,-160(s0)
++      add     a0,s0,-184
++      sw      zero,-152(s0)
++      sw      zero,-116(s0)
++      jalr    t1
++      lw      t1,-160(s0)
++      beqz    t1,.L1
++      li      t3,1
++      beq     t1,t3,.L22
++      li      t3,2
++      beq     t1,t3,.L25
++      li      t3,3
++      beq     t1,t3,.L22
++      li      t3,4
++      beq     t1,t3,.L26
++      li      t3,5
++      beq     t1,t3,.L27
++      li      t3,6
++      beq     t1,t3,.L23
++      li      t3,7
++      beq     t1,t3,.L23
++      li      t3,8
++      beq     t1,t3,.L23
++      li      t3,9
++      beq     t1,t3,.L23
++      add     t3,t1,-10
++      li      t4,1
++      bleu    t3,t4,.L28
++      li      t3,12
++      beq     t1,t3,.L29
++      li      t3,13
++      beq     t1,t3,.L30
++      li      t3,14
++      beq     t1,t3,.L23
++      li      t3,15
++      bne     t1,t3,.L1
++      lw      t1,-184(s0)
++      and     t1,t1,2
++      beqz    t1,.L1
++      lw      t3,-156(s0)
++      li      t1,7
++      add     t5,t3,-1
++      bgtu    t5,t1,.L1
++      lw      t1,-164(s0)
++      lbu     t5,0(t1)
++      mv      a0,t5
++      beq     t3,t4,.L1
++      lbu     t4,1(t1)
++      li      t6,2
++      sll     t4,t4,8
++      or      t5,t4,t5
++      mv      a0,t5
++      beq     t3,t6,.L1
++      lbu     t4,2(t1)
++      li      t6,3
++      sll     t4,t4,16
++      or      t4,t4,t5
++      mv      a0,t4
++      beq     t3,t6,.L1
++      lbu     a0,3(t1)
++      li      t5,4
++      sll     a0,a0,24
++      or      a0,a0,t4
++      beq     t3,t5,.L1
++      lbu     t5,4(t1)
++      li      t4,5
++      mv      a1,t5
++      beq     t3,t4,.L1
++      lbu     t4,5(t1)
++      li      t6,6
++      sll     t4,t4,8
++      or      t5,t4,t5
++      mv      a1,t5
++      beq     t3,t6,.L1
++      lbu     t4,6(t1)
++      li      t6,8
++      sll     t4,t4,16
++      or      t4,t4,t5
++      mv      a1,t4
++      bne     t3,t6,.L1
++      lbu     a1,7(t1)
++      sll     a1,a1,24
++      or      a1,a1,t4
++.L1:
++      lw      ra,188(sp)
++      lw      s0,184(sp)
++      add     sp,sp,208
++      jr      ra
++.L22:
++      lbu     a0,-176(s0)
++      lw      ra,188(sp)
++      lw      s0,184(sp)
++      add     sp,sp,208
++      jr      ra
++.L23:
++      lw      a0,-176(s0)
++      j       .L1
++.L25:
++      lb      a0,-176(s0)
++      lw      ra,188(sp)
++      lw      s0,184(sp)
++      add     sp,sp,208
++      jr      ra
++.L26:
++      lh      a0,-176(s0)
++      j       .L1
++.L27:
++      lhu     a0,-176(s0)
++      j       .L1
++.L28:
++      lw      a0,-176(s0)
++      lw      a1,-172(s0)
++      j       .L1
++.L29:
++      flw     fa0,-176(s0)
++      j       .L1
++.L30:
++      fld     fa0,-176(s0)
++      j       .L1
++      .size   vacall_receiver, .-vacall_receiver
++#else
+       .file   "vacall-riscv32.c"
+       .option nopic
+       .text
+@@ -160,6 +324,7 @@
+       fld     fa0,-176(s0)
+       j       .L1
+       .size   vacall_receiver, .-vacall_receiver
++#endif
+ #if defined __linux__ || defined __FreeBSD__ || defined __FreeBSD_kernel__ || 
defined __DragonFly__
+       .section .note.GNU-stack,"",@progbits
+ #endif
+diff -ruN libffcall-2.4/vacall/vacall-riscv64-lp64d-linux-pic.s 
libffcall-2.4-20220508/vacall/vacall-riscv64-lp64d-linux-pic.s
+--- libffcall-2.4/vacall/vacall-riscv64-lp64d-linux-pic.s      1970-01-01 
08:00:00.000000000 +0800
++++ libffcall-2.4-20220508/vacall/vacall-riscv64-lp64d-linux-pic.s     
2022-05-08 19:41:49.000000000 +0800
+@@ -0,0 +1,190 @@
++      .file   "vacall-riscv64.c"
++      .option pic
++      .text
++      .align  1
++      .globl  vacall_receiver
++      .type   vacall_receiver, @function
++vacall_receiver:
++      add     sp,sp,-288
++      sd      ra,264(sp)
++      sd      s0,256(sp)
++      sd      s1,248(sp)
++      add     s0,sp,272
++      la      t1,vacall_function
++      ld      t1,0(t1)
++      sd      a0,-200(s0)
++      add     a0,s0,16
++      sd      a7,8(s0)
++      sd      a1,-192(s0)
++      sd      a2,-184(s0)
++      sd      a3,-176(s0)
++      sd      a4,-168(s0)
++      sd      a5,-160(s0)
++      sd      a6,-152(s0)
++      sd      a7,-144(s0)
++      fsw     fa0,-132(s0)
++      fsw     fa1,-128(s0)
++      fsw     fa2,-124(s0)
++      fsw     fa3,-120(s0)
++      fsw     fa4,-116(s0)
++      fsw     fa5,-112(s0)
++      fsw     fa6,-108(s0)
++      fsw     fa7,-104(s0)
++      fsd     fa0,-96(s0)
++      fsd     fa1,-88(s0)
++      fsd     fa2,-80(s0)
++      fsd     fa3,-72(s0)
++      fsd     fa4,-64(s0)
++      fsd     fa5,-56(s0)
++      fsd     fa6,-48(s0)
++      fsd     fa7,-40(s0)
++      sd      a0,-240(s0)
++      sw      zero,-264(s0)
++      sd      zero,-232(s0)
++      add     a0,s0,-264
++      sw      zero,-224(s0)
++      sw      zero,-208(s0)
++      sw      zero,-136(s0)
++      jalr    t1
++      lw      t1,-224(s0)
++      beqz    t1,.L1
++      li      t3,1
++      beq     t1,t3,.L25
++      li      t3,2
++      beq     t1,t3,.L29
++      li      t3,3
++      beq     t1,t3,.L25
++      li      t3,4
++      beq     t1,t3,.L30
++      li      t3,5
++      beq     t1,t3,.L31
++      li      t3,6
++      beq     t1,t3,.L32
++      li      t3,7
++      beq     t1,t3,.L33
++      and     t3,t1,-3
++      li      t4,8
++      beq     t3,t4,.L27
++      li      t4,9
++      beq     t3,t4,.L27
++      li      t3,12
++      beq     t1,t3,.L34
++      li      t3,13
++      beq     t1,t3,.L35
++      li      t3,14
++      beq     t1,t3,.L27
++      li      t3,15
++      bne     t1,t3,.L1
++      lw      t3,-264(s0)
++      and     t3,t3,1024
++      beqz    t3,.L1
++      ld      t0,-216(s0)
++      add     t3,t0,-1
++      bgtu    t3,t1,.L1
++      ld      t5,-232(s0)
++      li      s1,8
++      and     t6,t5,7
++      add     t2,t0,t6
++      and     t5,t5,-8
++      sext.w  t6,t6
++      sllw    t1,t2,3
++      ld      t4,0(t5)
++      sll     t3,t6,3
++      bgtu    t0,s1,.L15
++      bgtu    t2,s1,.L16
++      addw    t1,t1,-1
++      li      a0,2
++      sll     a0,a0,t1
++      add     a0,a0,-1
++      and     a0,a0,t4
++      sra     a0,a0,t3
++.L1:
++      ld      ra,264(sp)
++      ld      s0,256(sp)
++      ld      s1,248(sp)
++      add     sp,sp,288
++      jr      ra
++.L25:
++      lbu     a0,-256(s0)
++      ld      ra,264(sp)
++      ld      s0,256(sp)
++      ld      s1,248(sp)
++      add     sp,sp,288
++      jr      ra
++.L29:
++      lb      a0,-256(s0)
++      ld      ra,264(sp)
++      ld      s0,256(sp)
++      ld      s1,248(sp)
++      add     sp,sp,288
++      jr      ra
++.L30:
++      lh      a0,-256(s0)
++      j       .L1
++.L33:
++      lwu     a0,-256(s0)
++      j       .L1
++.L31:
++      lhu     a0,-256(s0)
++      j       .L1
++.L27:
++      ld      a0,-256(s0)
++      j       .L1
++.L32:
++      lw      a0,-256(s0)
++      j       .L1
++.L34:
++      flw     fa0,-256(s0)
++      j       .L1
++.L35:
++      fld     fa0,-256(s0)
++      j       .L1
++.L15:
++      li      s1,16
++      sra     t4,t4,t3
++      ld      t0,8(t5)
++      bleu    t2,s1,.L36
++      li      a1,-8
++      mulw    t6,a1,t6
++      addw    t1,t1,-129
++      ld      a0,16(t5)
++      li      a1,2
++      sll     a1,a1,t1
++      add     a1,a1,-1
++      and     a1,a1,a0
++      sra     t3,t0,t3
++      addw    t1,t6,64
++      sll     a0,t0,t1
++      sll     a1,a1,t1
++      or      a0,a0,t4
++      or      a1,a1,t3
++      j       .L1
++.L16:
++      li      a0,-8
++      mulw    t6,a0,t6
++      addw    t1,t1,-65
++      ld      t5,8(t5)
++      li      a0,2
++      sll     a0,a0,t1
++      add     a0,a0,-1
++      and     a0,a0,t5
++      sra     t4,t4,t3
++      sll     a0,a0,t6
++      or      a0,a0,t4
++      j       .L1
++.L36:
++      li      a1,-4
++      mulw    t6,a1,t6
++      addw    t1,t1,-65
++      li      a0,2
++      sll     a0,a0,t1
++      add     a0,a0,-1
++      and     a0,a0,t0
++      sra     a1,a0,t3
++      addw    t1,t6,32
++      sll     a0,a0,t1
++      sll     a0,a0,t1
++      or      a0,a0,t4
++      j       .L1
++      .size   vacall_receiver, .-vacall_receiver
++      .ident  "GCC: (GNU) 7.3.0"
+diff -ruN libffcall-2.4/vacall/vacall-riscv64-lp64d-macro.S 
libffcall-2.4-20220508/vacall/vacall-riscv64-lp64d-macro.S
+--- libffcall-2.4/vacall/vacall-riscv64-lp64d-macro.S  2021-06-13 
02:30:16.000000000 +0800
++++ libffcall-2.4-20220508/vacall/vacall-riscv64-lp64d-macro.S 2022-05-08 
19:41:49.000000000 +0800
+@@ -1,3 +1,194 @@
++#ifdef __PIC__
++      .file   "vacall-riscv64.c"
++      .option pic
++      .text
++      .align  1
++      .globl  vacall_receiver
++      .type   vacall_receiver, @function
++vacall_receiver:
++      add     sp,sp,-288
++      sd      ra,264(sp)
++      sd      s0,256(sp)
++      sd      s1,248(sp)
++      add     s0,sp,272
++      la      t1,vacall_function
++      ld      t1,0(t1)
++      sd      a0,-200(s0)
++      add     a0,s0,16
++      sd      a7,8(s0)
++      sd      a1,-192(s0)
++      sd      a2,-184(s0)
++      sd      a3,-176(s0)
++      sd      a4,-168(s0)
++      sd      a5,-160(s0)
++      sd      a6,-152(s0)
++      sd      a7,-144(s0)
++      fsw     fa0,-132(s0)
++      fsw     fa1,-128(s0)
++      fsw     fa2,-124(s0)
++      fsw     fa3,-120(s0)
++      fsw     fa4,-116(s0)
++      fsw     fa5,-112(s0)
++      fsw     fa6,-108(s0)
++      fsw     fa7,-104(s0)
++      fsd     fa0,-96(s0)
++      fsd     fa1,-88(s0)
++      fsd     fa2,-80(s0)
++      fsd     fa3,-72(s0)
++      fsd     fa4,-64(s0)
++      fsd     fa5,-56(s0)
++      fsd     fa6,-48(s0)
++      fsd     fa7,-40(s0)
++      sd      a0,-240(s0)
++      sw      zero,-264(s0)
++      sd      zero,-232(s0)
++      add     a0,s0,-264
++      sw      zero,-224(s0)
++      sw      zero,-208(s0)
++      sw      zero,-136(s0)
++      jalr    t1
++      lw      t1,-224(s0)
++      beqz    t1,.L1
++      li      t3,1
++      beq     t1,t3,.L25
++      li      t3,2
++      beq     t1,t3,.L29
++      li      t3,3
++      beq     t1,t3,.L25
++      li      t3,4
++      beq     t1,t3,.L30
++      li      t3,5
++      beq     t1,t3,.L31
++      li      t3,6
++      beq     t1,t3,.L32
++      li      t3,7
++      beq     t1,t3,.L33
++      and     t3,t1,-3
++      li      t4,8
++      beq     t3,t4,.L27
++      li      t4,9
++      beq     t3,t4,.L27
++      li      t3,12
++      beq     t1,t3,.L34
++      li      t3,13
++      beq     t1,t3,.L35
++      li      t3,14
++      beq     t1,t3,.L27
++      li      t3,15
++      bne     t1,t3,.L1
++      lw      t3,-264(s0)
++      and     t3,t3,1024
++      beqz    t3,.L1
++      ld      t0,-216(s0)
++      add     t3,t0,-1
++      bgtu    t3,t1,.L1
++      ld      t5,-232(s0)
++      li      s1,8
++      and     t6,t5,7
++      add     t2,t0,t6
++      and     t5,t5,-8
++      sext.w  t6,t6
++      sllw    t1,t2,3
++      ld      t4,0(t5)
++      sll     t3,t6,3
++      bgtu    t0,s1,.L15
++      bgtu    t2,s1,.L16
++      addw    t1,t1,-1
++      li      a0,2
++      sll     a0,a0,t1
++      add     a0,a0,-1
++      and     a0,a0,t4
++      sra     a0,a0,t3
++.L1:
++      ld      ra,264(sp)
++      ld      s0,256(sp)
++      ld      s1,248(sp)
++      add     sp,sp,288
++      jr      ra
++.L25:
++      lbu     a0,-256(s0)
++      ld      ra,264(sp)
++      ld      s0,256(sp)
++      ld      s1,248(sp)
++      add     sp,sp,288
++      jr      ra
++.L29:
++      lb      a0,-256(s0)
++      ld      ra,264(sp)
++      ld      s0,256(sp)
++      ld      s1,248(sp)
++      add     sp,sp,288
++      jr      ra
++.L30:
++      lh      a0,-256(s0)
++      j       .L1
++.L33:
++      lwu     a0,-256(s0)
++      j       .L1
++.L31:
++      lhu     a0,-256(s0)
++      j       .L1
++.L27:
++      ld      a0,-256(s0)
++      j       .L1
++.L32:
++      lw      a0,-256(s0)
++      j       .L1
++.L34:
++      flw     fa0,-256(s0)
++      j       .L1
++.L35:
++      fld     fa0,-256(s0)
++      j       .L1
++.L15:
++      li      s1,16
++      sra     t4,t4,t3
++      ld      t0,8(t5)
++      bleu    t2,s1,.L36
++      li      a1,-8
++      mulw    t6,a1,t6
++      addw    t1,t1,-129
++      ld      a0,16(t5)
++      li      a1,2
++      sll     a1,a1,t1
++      add     a1,a1,-1
++      and     a1,a1,a0
++      sra     t3,t0,t3
++      addw    t1,t6,64
++      sll     a0,t0,t1
++      sll     a1,a1,t1
++      or      a0,a0,t4
++      or      a1,a1,t3
++      j       .L1
++.L16:
++      li      a0,-8
++      mulw    t6,a0,t6
++      addw    t1,t1,-65
++      ld      t5,8(t5)
++      li      a0,2
++      sll     a0,a0,t1
++      add     a0,a0,-1
++      and     a0,a0,t5
++      sra     t4,t4,t3
++      sll     a0,a0,t6
++      or      a0,a0,t4
++      j       .L1
++.L36:
++      li      a1,-4
++      mulw    t6,a1,t6
++      addw    t1,t1,-65
++      li      a0,2
++      sll     a0,a0,t1
++      add     a0,a0,-1
++      and     a0,a0,t0
++      sra     a1,a0,t3
++      addw    t1,t6,32
++      sll     a0,a0,t1
++      sll     a0,a0,t1
++      or      a0,a0,t4
++      j       .L1
++      .size   vacall_receiver, .-vacall_receiver
++#else
+       .file   "vacall-riscv64.c"
+       .option nopic
+       .text
+@@ -187,6 +378,7 @@
+       or      a0,a0,t4
+       j       .L1
+       .size   vacall_receiver, .-vacall_receiver
++#endif
+ #if defined __linux__ || defined __FreeBSD__ || defined __FreeBSD_kernel__ || 
defined __DragonFly__
+       .section .note.GNU-stack,"",@progbits
+ #endif

Reply via email to