tags +patch
thanks

The appended patch fixes the problem -- in theory. A build on mipsel
still fails because

 - The alignment for small arguments is broken in gcc-3.3 (it does
   big-endian style). gcc-3.4 works.
 - The floating-point argument passing convention is slightly broken
   for o32 with all gcc-3.x. gcc 2 and gcc 4 are ok.

This can be papered over by ignoring the testsuite results, as it was
done before this patch. It will still work in most cases relevant
for gnustep. If this is _not_ done, the mips build will fail as well
due to the FP argument brokenness. It may also fail for other
architectures which have some yet ignored testsuite problem, I haven't
checked these.


Thiemo


diff -uprN ffcall-1.10.orig/debian/rules ffcall-1.10/debian/rules
--- ffcall-1.10.orig/debian/rules       2005-03-23 19:25:27.000000000 +0000
+++ ffcall-1.10/debian/rules    2005-03-20 19:13:32.000000000 +0000
@@ -28,7 +28,7 @@ configure-stamp:
                --mandir=\$${prefix}/share/man \
                --infodir=\$${prefix}/usr/share/info \
                --enable-shared \
-               $(DEB_HOST_GNU_TYPE)
+               --build=$(DEB_HOST_GNU_TYPE)
        touch configure-stamp
 
 build: configure-stamp build-stamp
diff -uprN ffcall-1.10.orig/ffcall/Makefile.devel 
ffcall-1.10/ffcall/Makefile.devel
--- ffcall-1.10.orig/ffcall/Makefile.devel      2004-01-26 16:16:04.000000000 
+0000
+++ ffcall-1.10/ffcall/Makefile.devel   2005-03-21 02:40:22.000000000 +0000
@@ -9,30 +9,28 @@ all : autoconf/aclocal.m4 configures avc
 CONFIGURES = configure avcall/configure vacall/configure trampoline/configure 
callback/configure callback/vacall_r/configure callback/trampoline_r/configure
 CONFIGURES_IN = configure.in avcall/configure.in vacall/configure.in 
trampoline/configure.in callback/configure.in callback/vacall_r/configure.in 
callback/trampoline_r/configure.in
 
-CLISP_DIR = ..
-
-m4/general.m4 : $(CLISP_DIR)/src/m4/general.m4 ; cp -p $< [EMAIL PROTECTED] && 
mv [EMAIL PROTECTED] $@
-m4/proto.m4 : $(CLISP_DIR)/src/m4/proto.m4 ; cp -p $< [EMAIL PROTECTED] && mv 
[EMAIL PROTECTED] $@
-m4/cc-gcc.m4 : $(CLISP_DIR)/src/m4/cc-gcc.m4 ; cp -p $< [EMAIL PROTECTED] && 
mv [EMAIL PROTECTED] $@
-m4/as-underscore.m4 : $(CLISP_DIR)/src/m4/as-underscore.m4 ; cp -p $< [EMAIL 
PROTECTED] && mv [EMAIL PROTECTED] $@
-m4/ranlib.m4 : $(CLISP_DIR)/src/m4/ranlib.m4 ; cp -p $< [EMAIL PROTECTED] && 
mv [EMAIL PROTECTED] $@
-m4/install.m4 : $(CLISP_DIR)/src/m4/install.m4 ; cp -p $< [EMAIL PROTECTED] && 
mv [EMAIL PROTECTED] $@
-m4/cp.m4 : $(CLISP_DIR)/src/m4/cp.m4 ; cp -p $< [EMAIL PROTECTED] && mv [EMAIL 
PROTECTED] $@
-m4/ln.m4 : $(CLISP_DIR)/src/m4/ln.m4 ; cp -p $< [EMAIL PROTECTED] && mv [EMAIL 
PROTECTED] $@
-m4/ffcall-pccstruct.m4 : $(CLISP_DIR)/src/m4/ffcall-pccstruct.m4 ; cp -p $< 
[EMAIL PROTECTED] && mv [EMAIL PROTECTED] $@
-m4/ffcall-smallstruct.m4 : $(CLISP_DIR)/src/m4/ffcall-smallstruct.m4 ; cp -p 
$< [EMAIL PROTECTED] && mv [EMAIL PROTECTED] $@
-m4/ffcall-ireg.m4 : $(CLISP_DIR)/src/m4/ffcall-ireg.m4 ; cp -p $< [EMAIL 
PROTECTED] && mv [EMAIL PROTECTED] $@
-m4/longlong.m4 : $(CLISP_DIR)/src/m4/longlong.m4 ; cp -p $< [EMAIL PROTECTED] 
&& mv [EMAIL PROTECTED] $@
-m4/cc-void.m4 : $(CLISP_DIR)/src/m4/cc-void.m4 ; cp -p $< [EMAIL PROTECTED] && 
mv [EMAIL PROTECTED] $@
-m4/stdc-headers.m4 : $(CLISP_DIR)/src/m4/stdc-headers.m4 ; cp -p $< [EMAIL 
PROTECTED] && mv [EMAIL PROTECTED] $@
-m4/getpagesize.m4 : $(CLISP_DIR)/src/m4/getpagesize.m4 ; cp -p $< [EMAIL 
PROTECTED] && mv [EMAIL PROTECTED] $@
-m4/mach-vm.m4 : $(CLISP_DIR)/src/m4/mach-vm.m4 ; cp -p $< [EMAIL PROTECTED] && 
mv [EMAIL PROTECTED] $@
-m4/openflags.m4 : $(CLISP_DIR)/src/m4/openflags.m4 ; cp -p $< [EMAIL 
PROTECTED] && mv [EMAIL PROTECTED] $@
-m4/mmap.m4 : $(CLISP_DIR)/src/m4/mmap.m4 ; cp -p $< [EMAIL PROTECTED] && mv 
[EMAIL PROTECTED] $@
-m4/mprotect.m4 : $(CLISP_DIR)/src/m4/mprotect.m4 ; cp -p $< [EMAIL PROTECTED] 
&& mv [EMAIL PROTECTED] $@
-m4/shm.m4 : $(CLISP_DIR)/src/m4/shm.m4 ; cp -p $< [EMAIL PROTECTED] && mv 
[EMAIL PROTECTED] $@
-m4/ffcall-codeexec.m4 : $(CLISP_DIR)/src/m4/ffcall-codeexec.m4 ; cp -p $< 
[EMAIL PROTECTED] && mv [EMAIL PROTECTED] $@
-m4/libtool.m4 : $(CLISP_DIR)/src/m4/libtool.m4
+m4/general.m4 : cp -p $< [EMAIL PROTECTED] && mv [EMAIL PROTECTED] $@
+m4/proto.m4 : cp -p $< [EMAIL PROTECTED] && mv [EMAIL PROTECTED] $@
+m4/cc-gcc.m4 : cp -p $< [EMAIL PROTECTED] && mv [EMAIL PROTECTED] $@
+m4/as-underscore.m4 : cp -p $< [EMAIL PROTECTED] && mv [EMAIL PROTECTED] $@
+m4/ranlib.m4 : cp -p $< [EMAIL PROTECTED] && mv [EMAIL PROTECTED] $@
+m4/install.m4 : cp -p $< [EMAIL PROTECTED] && mv [EMAIL PROTECTED] $@
+m4/cp.m4 : cp -p $< [EMAIL PROTECTED] && mv [EMAIL PROTECTED] $@
+m4/ln.m4 : cp -p $< [EMAIL PROTECTED] && mv [EMAIL PROTECTED] $@
+m4/ffcall-pccstruct.m4 : cp -p $< [EMAIL PROTECTED] && mv [EMAIL PROTECTED] $@
+m4/ffcall-smallstruct.m4 : cp -p $< [EMAIL PROTECTED] && mv [EMAIL PROTECTED] 
$@
+m4/ffcall-ireg.m4 : cp -p $< [EMAIL PROTECTED] && mv [EMAIL PROTECTED] $@
+m4/longlong.m4 : cp -p $< [EMAIL PROTECTED] && mv [EMAIL PROTECTED] $@
+m4/cc-void.m4 : cp -p $< [EMAIL PROTECTED] && mv [EMAIL PROTECTED] $@
+m4/stdc-headers.m4 : cp -p $< [EMAIL PROTECTED] && mv [EMAIL PROTECTED] $@
+m4/getpagesize.m4 : cp -p $< [EMAIL PROTECTED] && mv [EMAIL PROTECTED] $@
+m4/mach-vm.m4 : cp -p $< [EMAIL PROTECTED] && mv [EMAIL PROTECTED] $@
+m4/openflags.m4 : cp -p $< [EMAIL PROTECTED] && mv [EMAIL PROTECTED] $@
+m4/mmap.m4 : cp -p $< [EMAIL PROTECTED] && mv [EMAIL PROTECTED] $@
+m4/mprotect.m4 : cp -p $< [EMAIL PROTECTED] && mv [EMAIL PROTECTED] $@
+m4/shm.m4 : cp -p $< [EMAIL PROTECTED] && mv [EMAIL PROTECTED] $@
+m4/ffcall-codeexec.m4 : cp -p $< [EMAIL PROTECTED] && mv [EMAIL PROTECTED] $@
+m4/libtool.m4 :
        sed -e 's,AC_CANONICAL_HOST,CL_CANONICAL_HOST,g' -e 
's,AC_PROG_RANLIB,CL_PROG_RANLIB,g' -e 's,AC_PROG_LN_S,CL_PROG_LN_S,g' < $< > 
[EMAIL PROTECTED] && mv [EMAIL PROTECTED] $@
 
 autoconf/aclocal.m4 : m4/general.m4 m4/proto.m4 m4/cc-gcc.m4 
m4/as-underscore.m4 m4/ranlib.m4 m4/install.m4 m4/cp.m4 m4/ln.m4 
m4/ffcall-pccstruct.m4 m4/ffcall-smallstruct.m4 m4/ffcall-ireg.m4 
m4/longlong.m4 m4/cc-void.m4 m4/stdc-headers.m4 m4/getpagesize.m4 m4/mach-vm.m4 
m4/openflags.m4 m4/mmap.m4 m4/mprotect.m4 m4/shm.m4 m4/ffcall-codeexec.m4 
m4/libtool.m4
diff -uprN ffcall-1.10.orig/ffcall/Makefile.in ffcall-1.10/ffcall/Makefile.in
--- ffcall-1.10.orig/ffcall/Makefile.in 2001-02-20 19:19:32.000000000 +0000
+++ ffcall-1.10/ffcall/Makefile.in      2005-03-21 02:38:28.000000000 +0000
@@ -19,35 +19,35 @@ RM = rm -f
 SHELL = /bin/sh
 
 all : force
-       cd @subdir@; $(MAKE) all
+       cd @subdir@ && $(MAKE) all
 
 install : force
-       cd @subdir@; $(MAKE) install
+       cd @subdir@ && $(MAKE) install
 
 installdirs : force
-       cd @subdir@; $(MAKE) installdirs
+       cd @subdir@ && $(MAKE) installdirs
 
 uninstall : force
-       cd @subdir@; $(MAKE) uninstall
+       cd @subdir@ && $(MAKE) uninstall
 
 check : force
-       cd @subdir@; $(MAKE) check
+       cd @subdir@ && $(MAKE) check
 
 extracheck : force
-       cd @subdir@; $(MAKE) extracheck
+       cd @subdir@ && $(MAKE) extracheck
 
 mostlyclean : force
-       cd @subdir@; $(MAKE) mostlyclean
+       cd @subdir@ && $(MAKE) mostlyclean
 
 clean : force
-       cd @subdir@; $(MAKE) clean
+       cd @subdir@ && $(MAKE) clean
 
 distclean : force
-       cd @subdir@; if test -f Makefile; then $(MAKE) distclean; fi
+       cd @subdir@ && if test -f Makefile; then $(MAKE) distclean; fi
        $(RM) config.status config.log config.cache Makefile
 
 maintainer-clean : force
-       cd @subdir@; if test -f Makefile; then $(MAKE) maintainer-clean; fi
+       cd @subdir@ && if test -f Makefile; then $(MAKE) maintainer-clean; fi
        $(RM) config.status config.log config.cache Makefile
 
 force :
diff -uprN ffcall-1.10.orig/ffcall/avcall/avcall-mips.S 
ffcall-1.10/ffcall/avcall/avcall-mips.S
--- ffcall-1.10.orig/ffcall/avcall/avcall-mips.S        1999-11-24 
22:39:34.000000000 +0000
+++ ffcall-1.10/ffcall/avcall/avcall-mips.S     2005-03-20 00:57:11.000000000 
+0000
@@ -1,418 +1,406 @@
-#include "asmmips.h"
        .file   1 "avcall-mips.c"
-       .set    nobopt
+       .section .mdebug.abi32
+       .previous
+       .abicalls
        .text
        .align  2
        .globl  __builtin_avcall
-       DECLARE_FUNCTION(__builtin_avcall)
-
-       .text
-       .text
        .ent    __builtin_avcall
+       .type   __builtin_avcall, @function
 __builtin_avcall:
-       .frame  $fp,32,$31              
-       .mask   0xc0010000,-8
+       .frame  $fp,96,$31              # vars= 56, regs= 3/0, args= 16, extra= 
8
+       .mask   0xd0000000,-8
        .fmask  0x00000000,0
-       subu    $sp,$sp,32
-       sw      $fp,20($sp)
+       .set    noreorder
+       .cpload $25
+       .set    reorder
+       subu    $sp,$sp,96
+       .cprestore 16
+       sw      $31,88($sp)
+       sw      $fp,84($sp)
+       sw      $28,80($sp)
        move    $fp,$sp
-       sw      $31,24($sp)
-       sw      $16,16($sp)
+       sw      $4,96($fp)
        addu    $sp,$sp,-1024
-       move    $16,$4
-       move    $4,$sp
-       lw      $3,20($16)
-       lw      $2,4($16)
-       addu    $3,$3,-48
-       subu    $3,$3,$16
+       addu    $2,$sp,16
+       sw      $2,24($fp)
+       sw      $sp,28($fp)
+       lw      $2,96($fp)
+       lw      $3,20($2)
+       lw      $2,96($fp)
+       subu    $2,$3,$2
+       addu    $2,$2,-56
+       sra     $2,$2,2
+       sw      $2,32($fp)
+       lw      $2,96($fp)
+       lw      $2,4($2)
        andi    $2,$2,0x400
-       .set    noreorder
-       .set    nomacro
+       beq     $2,$0,$L2
+       lw      $2,96($fp)
+#APP
+       l.s $f12,28($2)
+#NO_APP
+$L2:
+       lw      $2,96($fp)
+       lw      $2,4($2)
+       andi    $2,$2,0x1000
        beq     $2,$0,$L3
-       sra     $3,$3,2
-       .set    macro
-       .set    reorder
-
- 
-       l.d $f12,32($16)
- 
-       lw      $2,4($16)
-       
-       andi    $2,$2,0x800
-       .set    noreorder
-       .set    nomacro
-       beq     $2,$0,$L65
-       li      $6,4                    
-       .set    macro
-       .set    reorder
-
- 
-       l.d $f14,40($16)
- 
+       lw      $2,96($fp)
+#APP
+       l.d $f12,40($2)
+#NO_APP
 $L3:
-       li      $6,4                    
-$L65:
-       slt     $2,$6,$3
-       .set    noreorder
-       .set    nomacro
-       beq     $2,$0,$L6
-       addu    $5,$4,16
-       .set    macro
-       .set    reorder
-
-       addu    $4,$16,64
-       addu    $6,$3,-4
-$L8:
-       lw      $2,0($4)
-       addu    $4,$4,4
-       addu    $6,$6,-1
-       sw      $2,0($5)
-       .set    noreorder
-       .set    nomacro
-       bne     $6,$0,$L8
-       addu    $5,$5,4
-       .set    macro
-       .set    reorder
-
+       lw      $2,96($fp)
+       lw      $2,4($2)
+       andi    $2,$2,0x800
+       beq     $2,$0,$L4
+       lw      $2,96($fp)
+       lw      $2,4($2)
+       andi    $2,$2,0x1400
+       beq     $2,$0,$L4
+       lw      $2,96($fp)
+#APP
+       l.s $f14,32($2)
+#NO_APP
+$L4:
+       lw      $2,96($fp)
+       lw      $2,4($2)
+       andi    $2,$2,0x2000
+       beq     $2,$0,$L5
+       lw      $2,96($fp)
+       lw      $2,4($2)
+       andi    $2,$2,0x1400
+       beq     $2,$0,$L5
+       lw      $2,96($fp)
+#APP
+       l.d $f14,48($2)
+#NO_APP
+$L5:
+       li      $2,4                    # 0x4
+       sw      $2,36($fp)
 $L6:
-       lw      $25,0($16)
-       lw      $4,48($16)
-       lw      $5,52($16)
-       lw      $6,56($16)
-       lw      $7,60($16)
+       lw      $2,36($fp)
+       lw      $3,32($fp)
+       slt     $2,$2,$3
+       bne     $2,$0,$L9
+       b       $L7
+$L9:
+       lw      $2,36($fp)
+       sll     $3,$2,2
+       lw      $2,28($fp)
+       addu    $4,$3,$2
+       lw      $3,96($fp)
+       lw      $2,36($fp)
+       sll     $2,$2,2
+       addu    $2,$2,$3
+       addu    $2,$2,56
+       lw      $2,0($2)
+       sw      $2,0($4)
+       lw      $2,36($fp)
+       addu    $2,$2,1
+       sw      $2,36($fp)
+       b       $L6
+$L7:
+       lw      $2,96($fp)
+       lw      $25,0($2)
+       lw      $2,96($fp)
+       lw      $3,96($fp)
+       lw      $6,96($fp)
+       lw      $7,96($fp)
+       lw      $4,56($2)
+       lw      $5,60($3)
+       lw      $6,64($6)
+       lw      $7,68($7)
        jal     $31,$25
-       move    $6,$2
-       lw      $4,12($16)
-       li      $2,1                    
-       .set    noreorder
-       .set    nomacro
-       beq     $4,$2,$L64
-       move    $2,$0
-       .set    macro
-       .set    reorder
-
-       .set    noreorder
-       .set    nomacro
-       beq     $4,$0,$L61
-       li      $2,2                    
-       .set    macro
-       .set    reorder
-
-       .set    noreorder
-       .set    nomacro
-       beq     $4,$2,$L62
-       li      $2,3                    
-       .set    macro
-       .set    reorder
-
-       .set    noreorder
-       .set    nomacro
-       beq     $4,$2,$L62
-       li      $2,4                    
-       .set    macro
-       .set    reorder
-
-       .set    noreorder
-       .set    nomacro
-       beq     $4,$2,$L62
-       li      $2,5                    
-       .set    macro
-       .set    reorder
-
-       .set    noreorder
-       .set    nomacro
-       beq     $4,$2,$L63
-       li      $2,6                    
-       .set    macro
-       .set    reorder
-
-       .set    noreorder
-       .set    nomacro
-       beq     $4,$2,$L63
-       li      $2,7                    
-       .set    macro
-       .set    reorder
-
-       .set    noreorder
-       .set    nomacro
-       beq     $4,$2,$L61
-       li      $2,8                    
-       .set    macro
-       .set    reorder
-
-       .set    noreorder
-       .set    nomacro
-       beq     $4,$2,$L61
-       li      $2,9                    
-       .set    macro
-       .set    reorder
-
-       .set    noreorder
-       .set    nomacro
-       beq     $4,$2,$L61
-       li      $2,10                   
-       .set    macro
-       .set    reorder
-
-       .set    noreorder
-       .set    nomacro
-       beq     $4,$2,$L61
-       addu    $2,$4,-11
-       .set    macro
-       .set    reorder
-
-       sltu    $2,$2,2
-       .set    noreorder
-       .set    nomacro
-       beq     $2,$0,$L32
-       li      $2,13                   
-       .set    macro
-       .set    reorder
-
-       lw      $2,8($16)
-       
-       sw      $6,0($2)
-       lw      $4,8($16)
-       .set    noreorder
-       .set    nomacro
-       j       $L11
-       sw      $3,4($4)
-       .set    macro
-       .set    reorder
-
-$L32:
-       .set    noreorder
-       .set    nomacro
-       bne     $4,$2,$L34
-       li      $2,14                   
-       .set    macro
-       .set    reorder
-
-       lw      $2,8($16)
-       .set    noreorder
-       .set    nomacro
-       j       $L11
+       sw      $2,40($fp)
+#ifdef _MIPSEB
+       sw      $3,52($fp)
+#else
+       sw      $3,48($fp)
+#endif
+       sra     $2,$3,31
+#ifdef _MIPSEB
+       sw      $2,48($fp)
+#else
+       sw      $2,52($fp)
+#endif
+       s.s     $f0,56($fp)
+       s.d     $f0,64($fp)
+       lw      $2,96($fp)
+       sw      $2,76($fp)
+       lw      $3,76($fp)
+       lw      $2,12($3)
+       sltu    $2,$2,17
+       beq     $2,$0,$L10
+       lw      $3,76($fp)
+       lw      $2,12($3)
+       sll     $3,$2,2
+       la      $2,$L48
+       addu    $2,$3,$2
+       lw      $2,0($2)
+       .cpadd  $2
+       j       $2
+       .rdata
+       .align  2
+$L48:
+       .gpword $L13
+       .gpword $L10
+       .gpword $L14
+       .gpword $L15
+       .gpword $L16
+       .gpword $L17
+       .gpword $L18
+       .gpword $L19
+       .gpword $L20
+       .gpword $L21
+       .gpword $L22
+       .gpword $L24
+       .gpword $L24
+       .gpword $L25
+       .gpword $L26
+       .gpword $L27
+       .gpword $L28
+       .text
+$L13:
+       lw      $2,96($fp)
+       lw      $3,8($2)
+       lw      $2,40($fp)
+       sw      $2,0($3)
+       b       $L10
+$L14:
+       lw      $2,96($fp)
+       lw      $3,8($2)
+#ifdef _MIPSEB
+       lbu     $2,43($fp)
+#else
+       lbu     $2,40($fp)
+#endif
+       sb      $2,0($3)
+       b       $L10
+$L15:
+       lw      $2,96($fp)
+       lw      $3,8($2)
+#ifdef _MIPSEB
+       lbu     $2,43($fp)
+#else
+       lbu     $2,40($fp)
+#endif
+       sb      $2,0($3)
+       b       $L10
+$L16:
+       lw      $2,96($fp)
+       lw      $3,8($2)
+#ifdef _MIPSEB
+       lbu     $2,43($fp)
+#else
+       lbu     $2,40($fp)
+#endif
+       sb      $2,0($3)
+       b       $L10
+$L17:
+       lw      $2,96($fp)
+       lw      $3,8($2)
+#ifdef _MIPSEB
+       lhu     $2,42($fp)
+#else
+       lhu     $2,40($fp)
+#endif
+       sh      $2,0($3)
+       b       $L10
+$L18:
+       lw      $2,96($fp)
+       lw      $3,8($2)
+#ifdef _MIPSEB
+       lhu     $2,42($fp)
+#else
+       lhu     $2,40($fp)
+#endif
+       sh      $2,0($3)
+       b       $L10
+$L19:
+       lw      $2,96($fp)
+       lw      $3,8($2)
+       lw      $2,40($fp)
+       sw      $2,0($3)
+       b       $L10
+$L20:
+       lw      $2,96($fp)
+       lw      $3,8($2)
+       lw      $2,40($fp)
+       sw      $2,0($3)
+       b       $L10
+$L21:
+       lw      $2,96($fp)
+       lw      $3,8($2)
+       lw      $2,40($fp)
+       sw      $2,0($3)
+       b       $L10
+$L22:
+       lw      $2,96($fp)
+       lw      $3,8($2)
+       lw      $2,40($fp)
+       sw      $2,0($3)
+       b       $L10
+$L24:
+       lw      $2,96($fp)
+       lw      $3,8($2)
+       lw      $2,40($fp)
+       sw      $2,0($3)
+       lw      $2,96($fp)
+       lw      $2,8($2)
+       addu    $3,$2,4
+#ifdef _MIPSEB
+       lw      $2,52($fp)
+#else
+       lw      $2,48($fp)
+#endif
+       sw      $2,0($3)
+       b       $L10
+$L25:
+       lw      $2,96($fp)
+       lw      $2,8($2)
+       l.s     $f0,56($fp)
        s.s     $f0,0($2)
-       .set    macro
-       .set    reorder
-
-$L34:
-       .set    noreorder
-       .set    nomacro
-       bne     $4,$2,$L36
-       li      $2,15                   
-       .set    macro
-       .set    reorder
-
-       lw      $2,8($16)
-       
+       b       $L10
+$L26:
+       lw      $2,96($fp)
+       lw      $2,8($2)
+       l.d     $f0,64($fp)
        s.d     $f0,0($2)
-       .set    noreorder
-       .set    nomacro
-       j       $L64
-       move    $2,$0
-       .set    macro
-       .set    reorder
-
-$L36:
-       .set    noreorder
-       .set    nomacro
-       beq     $4,$2,$L61
-       li      $2,16                   
-       .set    macro
-       .set    reorder
-
-       .set    noreorder
-       .set    nomacro
-       bne     $4,$2,$L64
-       move    $2,$0
-       .set    macro
-       .set    reorder
-
-       lw      $3,4($16)
-       
-       andi    $2,$3,0x1
-       .set    noreorder
-       .set    nomacro
-       beq     $2,$0,$L41
-       li      $2,1                    
-       .set    macro
-       .set    reorder
-
-       lw      $3,16($16)
-       
-       .set    noreorder
-       .set    nomacro
-       bne     $3,$2,$L42
-       li      $2,2                    
-       .set    macro
-       .set    reorder
-
-       lw      $3,8($16)
-       lbu     $2,0($6)
-       .set    noreorder
-       .set    nomacro
-       j       $L11
+       b       $L10
+$L27:
+       lw      $2,96($fp)
+       lw      $3,8($2)
+       lw      $2,40($fp)
+       sw      $2,0($3)
+       b       $L10
+$L28:
+       lw      $2,96($fp)
+       lw      $2,4($2)
+       andi    $2,$2,0x1
+       beq     $2,$0,$L29
+       lw      $2,96($fp)
+       lw      $3,16($2)
+       li      $2,1                    # 0x1
+       bne     $3,$2,$L30
+       lw      $2,96($fp)
+       lw      $3,8($2)
+       lw      $2,40($fp)
+       lbu     $2,0($2)
        sb      $2,0($3)
-       .set    macro
-       .set    reorder
-
-$L42:
-       .set    noreorder
-       .set    nomacro
-       bne     $3,$2,$L44
-       li      $2,4                    
-       .set    macro
-       .set    reorder
-
-       lw      $3,8($16)
-       lhu     $2,0($6)
-       .set    noreorder
-       .set    nomacro
-       j       $L11
+       b       $L10
+$L30:
+       lw      $2,96($fp)
+       lw      $3,16($2)
+       li      $2,2                    # 0x2
+       bne     $3,$2,$L32
+       lw      $2,96($fp)
+       lw      $3,8($2)
+       lw      $2,40($fp)
+       lhu     $2,0($2)
        sh      $2,0($3)
-       .set    macro
-       .set    reorder
-
-$L44:
-       .set    noreorder
-       .set    nomacro
-       bne     $3,$2,$L46
-       li      $2,8                    
-       .set    macro
-       .set    reorder
-
-       lw      $3,8($16)
-       lw      $2,0($6)
-       .set    noreorder
-       .set    nomacro
-       j       $L11
+       b       $L10
+$L32:
+       lw      $2,96($fp)
+       lw      $3,16($2)
+       li      $2,4                    # 0x4
+       bne     $3,$2,$L34
+       lw      $2,96($fp)
+       lw      $3,8($2)
+       lw      $2,40($fp)
+       lw      $2,0($2)
        sw      $2,0($3)
-       .set    macro
-       .set    reorder
-
-$L46:
-       .set    noreorder
-       .set    nomacro
-       bne     $3,$2,$L48
-       addu    $2,$3,3
-       .set    macro
-       .set    reorder
-
-       lw      $3,8($16)
-       lw      $2,0($6)
-       
+       b       $L10
+$L34:
+       lw      $2,96($fp)
+       lw      $3,16($2)
+       li      $2,8                    # 0x8
+       bne     $3,$2,$L36
+       lw      $2,96($fp)
+       lw      $3,8($2)
+       lw      $2,40($fp)
+       lw      $2,0($2)
        sw      $2,0($3)
-       lw      $4,8($16)
-       lw      $2,4($6)
-       .set    noreorder
-       .set    nomacro
-       j       $L11
-       sw      $2,4($4)
-       .set    macro
-       .set    reorder
-
-$L48:
-       srl     $5,$2,2
-       addu    $5,$5,-1
-       .set    noreorder
-       .set    nomacro
-       bltz    $5,$L11
-       sll     $2,$5,2
-       .set    macro
-       .set    reorder
-
-       addu    $6,$2,$6
-$L52:
-       lw      $2,0($6)
-       addu    $6,$6,-4
-       sll     $3,$5,2
-       lw      $4,8($16)
-       addu    $5,$5,-1
-       addu    $3,$3,$4
-       .set    noreorder
-       .set    nomacro
-       bgez    $5,$L52
+       lw      $2,96($fp)
+       lw      $2,8($2)
+       addu    $3,$2,4
+       lw      $2,40($fp)
+       addu    $2,$2,4
+       lw      $2,0($2)
        sw      $2,0($3)
-       .set    macro
-       .set    reorder
-
-       .set    noreorder
-       .set    nomacro
-       j       $L64
-       move    $2,$0
-       .set    macro
-       .set    reorder
-
-$L41:
-       andi    $2,$3,0x2
-       .set    noreorder
-       .set    nomacro
-       beq     $2,$0,$L11
-       li      $2,1                    
-       .set    macro
-       .set    reorder
-
-       lw      $3,16($16)
-       
-       .set    noreorder
-       .set    nomacro
-       bne     $3,$2,$L56
-       li      $2,2                    
-       .set    macro
-       .set    reorder
-
-$L62:
-       lw      $2,8($16)
-       .set    noreorder
-       .set    nomacro
-       j       $L11
-       sb      $6,0($2)
-       .set    macro
-       .set    reorder
-
-$L56:
-       .set    noreorder
-       .set    nomacro
-       bne     $3,$2,$L58
-       li      $2,4                    
-       .set    macro
-       .set    reorder
-
-$L63:
-       lw      $2,8($16)
-       .set    noreorder
-       .set    nomacro
-       j       $L11
-       sh      $6,0($2)
-       .set    macro
-       .set    reorder
-
-$L58:
-       .set    noreorder
-       .set    nomacro
-       bne     $3,$2,$L64
-       move    $2,$0
-       .set    macro
-       .set    reorder
-
-$L61:
-       lw      $2,8($16)
-       
-       sw      $6,0($2)
-$L11:
+       b       $L10
+$L36:
+       lw      $2,96($fp)
+       lw      $2,16($2)
+       addu    $2,$2,3
+       srl     $2,$2,2
+       sw      $2,72($fp)
+$L38:
+       lw      $2,72($fp)
+       addu    $2,$2,-1
+       sw      $2,72($fp)
+       bgez    $2,$L40
+       b       $L10
+$L40:
+       lw      $4,96($fp)
+       lw      $2,72($fp)
+       sll     $3,$2,2
+       lw      $2,8($4)
+       addu    $4,$3,$2
+       lw      $2,72($fp)
+       sll     $3,$2,2
+       lw      $2,40($fp)
+       addu    $2,$3,$2
+       lw      $2,0($2)
+       sw      $2,0($4)
+       b       $L38
+$L29:
+       lw      $2,96($fp)
+       lw      $2,4($2)
+       andi    $2,$2,0x2
+       beq     $2,$0,$L10
+       lw      $2,96($fp)
+       lw      $3,16($2)
+       li      $2,1                    # 0x1
+       bne     $3,$2,$L43
+       lw      $2,96($fp)
+       lw      $3,8($2)
+#ifdef _MIPSEB
+       lbu     $2,43($fp)
+#else
+       lbu     $2,40($fp)
+#endif
+       sb      $2,0($3)
+       b       $L10
+$L43:
+       lw      $2,96($fp)
+       lw      $3,16($2)
+       li      $2,2                    # 0x2
+       bne     $3,$2,$L45
+       lw      $2,96($fp)
+       lw      $3,8($2)
+#ifdef _MIPSEB
+       lhu     $2,42($fp)
+#else
+       lhu     $2,40($fp)
+#endif
+       sh      $2,0($3)
+       b       $L10
+$L45:
+       lw      $2,96($fp)
+       lw      $3,16($2)
+       li      $2,4                    # 0x4
+       bne     $3,$2,$L10
+       lw      $2,96($fp)
+       lw      $3,8($2)
+       lw      $2,40($fp)
+       sw      $2,0($3)
+$L10:
        move    $2,$0
-$L64:
        move    $sp,$fp
-       lw      $31,24($sp)
-       lw      $fp,20($sp)
-       lw      $16,16($sp)
-       
-       .set    noreorder
-       .set    nomacro
+       lw      $31,88($sp)
+       lw      $fp,84($sp)
+       addu    $sp,$sp,96
        j       $31
-       addu    $sp,$sp,32
-       .set    macro
-       .set    reorder
-
        .end    __builtin_avcall
+       .ident  "GCC: (GNU) 3.3.5 (Debian 1:3.3.5-8)"
diff -uprN ffcall-1.10.orig/ffcall/avcall/avcall-mips.c 
ffcall-1.10/ffcall/avcall/avcall-mips.c
--- ffcall-1.10.orig/ffcall/avcall/avcall-mips.c        2004-01-26 
14:58:31.000000000 +0000
+++ ffcall-1.10/ffcall/avcall/avcall-mips.c     2005-03-20 00:57:11.000000000 
+0000
@@ -3,6 +3,7 @@
 /**
   Copyright 1993 Bill Triggs, <[EMAIL PROTECTED]>
   Copyright 1995-1999 Bruno Haible, <[EMAIL PROTECTED]>
+  Copyright 2005 Thiemo Seufer  <[EMAIL PROTECTED]>
 
   This is free software distributed under the GNU General Public
   Licence described in the file COPYING. Contact the author if
@@ -49,108 +50,122 @@ register func_pointer     t9      __asm__("$25");
 int
 __builtin_avcall(av_alist* l)
 {
-  register __avword*   sp      __asm__("$sp");  /* C names for registers */
-  register __avword    iret2   __asm__("$3");
-  register float       fret    __asm__("$f0");
-  register double      dret    __asm__("$f0");
+  register __avword*   sp __asm__("$sp");  /* C names for registers */
+  register __avword    iret2_tmp __asm__("$3");
+  register float       fret_tmp __asm__("$f0");
+  register double      dret_tmp __asm__("$f0");
   __avword *space = __builtin_alloca(__AV_ALIST_WORDS * sizeof(__avword));     
/* big space for child's stack frame */
   __avword *argframe = (__avword*)sp;  /* stack offset for argument list is 0 
*/
   int arglen = l->aptr - l->args;
-  __avword i;
-
-  if (l->flags & __AV_FLOAT_1)         /* push leading float args */
-  {
-    __asm__("l.d $f12,%1(%0)" : : "p" (l), "i" OFFSETOF(av_alist,floatarg[0]));
-    if (l->flags & __AV_FLOAT_2)
-      __asm__("l.d $f14,%1(%0)" : : "p" (l), "i" 
OFFSETOF(av_alist,floatarg[1]));
-  }
+  int i;
+  __avword iret;
+  long long iret2;
+  float fret;
+  double dret;
+
+  /* load leading float args */        
+  if (l->flags & __AV_FLOAT_1)
+    __asm__("l.s $f12,%1(%0)" : : "p" (l), "i" OFFSETOF(av_alist,floatarg[0]));
+  if (l->flags & __AV_DOUBLE_1)
+    __asm__("l.d $f12,%1(%0)" : : "p" (l), "i" 
OFFSETOF(av_alist,doublearg[0]));
+  if ((l->flags & __AV_FLOAT_2) && (l->flags & (__AV_FLOAT_1 | __AV_DOUBLE_1)))
+    __asm__("l.s $f14,%1(%0)" : : "p" (l), "i" OFFSETOF(av_alist,floatarg[1]));
+  if ((l->flags & __AV_DOUBLE_2) && (l->flags & (__AV_FLOAT_1 | 
__AV_DOUBLE_1)))
+    __asm__("l.d $f14,%1(%0)" : : "p" (l), "i" 
OFFSETOF(av_alist,doublearg[1]));
 
   for (i = 4; i < arglen; i++)         /* push excess function args */
     argframe[i] = l->args[i];
 
-  i = (*(t9 = l->func))(l->args[0], l->args[1],  /* call function with 1st 4 
args */
-                       l->args[2], l->args[3]);
+  iret = (*(t9 = l->func))(l->args[0], l->args[1],  /* call function with 1st 
4 args */
+                          l->args[2], l->args[3]);
+  iret2 = iret2_tmp;
+  fret = fret_tmp;
+  dret = dret_tmp;
 
   /* save return value */
-  if (l->rtype == __AVvoid) {
-  } else
-  if (l->rtype == __AVword) {
-    RETURN(__avword, i);
-  } else
-  if (l->rtype == __AVchar) {
-    RETURN(char, i);
-  } else
-  if (l->rtype == __AVschar) {
-    RETURN(signed char, i);
-  } else
-  if (l->rtype == __AVuchar) {
-    RETURN(unsigned char, i);
-  } else
-  if (l->rtype == __AVshort) {
-    RETURN(short, i);
-  } else
-  if (l->rtype == __AVushort) {
-    RETURN(unsigned short, i);
-  } else
-  if (l->rtype == __AVint) {
-    RETURN(int, i);
-  } else
-  if (l->rtype == __AVuint) {
-    RETURN(unsigned int, i);
-  } else
-  if (l->rtype == __AVlong) {
-    RETURN(long, i);
-  } else
-  if (l->rtype == __AVulong) {
-    RETURN(unsigned long, i);
-  } else
-  if (l->rtype == __AVlonglong || l->rtype == __AVulonglong) {
-    ((__avword*)l->raddr)[0] = i;
+  switch (l->rtype) {
+  default:
+  case __AVvoid:
+    break;
+  case __AVword:
+    RETURN(__avword, iret);
+    break;
+  case __AVchar:
+    RETURN(char, iret);
+    break;
+  case __AVschar:
+    RETURN(signed char, iret);
+    break;
+  case __AVuchar:
+    RETURN(unsigned char, iret);
+    break;
+  case __AVshort:
+    RETURN(short, iret);
+    break;
+  case __AVushort:
+    RETURN(unsigned short, iret);
+    break;
+  case __AVint:
+    RETURN(int, iret);
+    break;
+  case __AVuint:
+    RETURN(unsigned int, iret);
+    break;
+  case __AVlong:
+    RETURN(long, iret);
+    break;
+  case __AVulong:
+    RETURN(unsigned long, iret);
+    break;
+  case __AVlonglong:
+  case __AVulonglong:
+    ((__avword*)l->raddr)[0] = (__avword)(iret);
     ((__avword*)l->raddr)[1] = iret2;
-  } else
-  if (l->rtype == __AVfloat) {
+    break;
+  case __AVfloat:
     RETURN(float, fret);
-  } else
-  if (l->rtype == __AVdouble) {
+    break;
+  case __AVdouble:
     RETURN(double, dret);
-  } else
-  if (l->rtype == __AVvoidp) {
-    RETURN(void*, i);
-  } else
-  if (l->rtype == __AVstruct) {
+    break;
+  case __AVvoidp:
+    RETURN(void*, (__avword)iret);
+    break;
+  case __AVstruct:
     if (l->flags & __AV_PCC_STRUCT_RETURN) {
       /* pcc struct return convention: need a  *(TYPE*)l->raddr = *(TYPE*)i;  
*/
       if (l->rsize == sizeof(char)) {
-        RETURN(char, *(char*)i);
+        RETURN(char, *(char*)(__avword)iret);
       } else
       if (l->rsize == sizeof(short)) {
-        RETURN(short, *(short*)i);
+        RETURN(short, *(short*)(__avword)iret);
       } else
       if (l->rsize == sizeof(int)) {
-        RETURN(int, *(int*)i);
+        RETURN(int, *(int*)(__avword)iret);
       } else
       if (l->rsize == sizeof(double)) {
-        ((int*)l->raddr)[0] = ((int*)i)[0];
-        ((int*)l->raddr)[1] = ((int*)i)[1];
+        ((int*)l->raddr)[0] = ((int*)(__avword)iret)[0];
+        ((int*)l->raddr)[1] = ((int*)(__avword)iret)[1];
       } else {
         int n = (l->rsize + sizeof(__avword)-1)/sizeof(__avword);
         while (--n >= 0)
-          ((__avword*)l->raddr)[n] = ((__avword*)i)[n];
+          ((__avword*)l->raddr)[n] = ((__avword*)(__avword)iret)[n];
       }
     } else {
       /* normal struct return convention */
       if (l->flags & __AV_SMALL_STRUCT_RETURN) {
         if (l->rsize == sizeof(char)) {
-          RETURN(char, i);
+          RETURN(char, iret);
         } else
         if (l->rsize == sizeof(short)) {
-          RETURN(short, i);
+          RETURN(short, iret);
         } else
         if (l->rsize == sizeof(int)) {
-          RETURN(int, i);
+          RETURN(int, iret);
         }
       }
     }
+    break;
   }
   return 0;
 }
diff -uprN ffcall-1.10.orig/ffcall/avcall/avcall.h.in 
ffcall-1.10/ffcall/avcall/avcall.h.in
--- ffcall-1.10.orig/ffcall/avcall/avcall.h.in  2004-01-26 14:58:41.000000000 
+0000
+++ ffcall-1.10/ffcall/avcall/avcall.h.in       2005-03-20 00:57:11.000000000 
+0000
@@ -323,9 +323,11 @@ enum __AV_alist_flags
 #if defined(__i386__) || defined(__m68k__) || defined(__mipsn32__) || 
defined(__mips64__) || defined(__sparc64__) || defined(__alpha__) || 
defined(__arm__) || defined(__rs6000__) || defined(__convex__) || 
defined(__ia64__) || defined(__x86_64__) || defined(__s390__)
   __AV_REGISTER_STRUCT_RETURN  = 1<<9,
 #endif
-#if defined(__mips__) && !defined(__mipsn32__)
+#if defined(__mips__) && !defined(__mipsn32__) && !defined(__mips64__)
   __AV_FLOAT_1                 = 1<<10,
   __AV_FLOAT_2                 = 1<<11,
+  __AV_DOUBLE_1                        = 1<<12,
+  __AV_DOUBLE_2                        = 1<<13,
 #endif
 
   __AV_flag_for_broken_compilers_that_dont_like_trailing_commas
@@ -367,10 +369,11 @@ typedef struct
   __avword*            iaptr;
   __avword             iargs[6];
 #endif
-#if defined(__mips__) && !defined(__mipsn32__)
+#if defined(__mips__) && !defined(__mipsn32__) && !defined(__mips64__)
   /* store the floating-point arguments in an extra array */
   int                  anum;
-  double               floatarg[2];
+  float                        floatarg[2];
+  double               doublearg[2];
 #endif
 #if defined(__mipsn32__) || defined(__mips64__)
   /* store the floating-point arguments in an extra array */
@@ -447,7 +450,7 @@ typedef struct
 #define __av_start1(LIST)                                              \
    (LIST).aptr = &(LIST).args[0],
 #endif
-#if defined(__mips__) && !defined(__mipsn32__)
+#if defined(__mips__) && !defined(__mipsn32__) && !defined(__mips64__)
 #define __av_start1(LIST)                                              \
    (LIST).anum = 0,                                                    \
    (LIST).aptr = &(LIST).args[0],
@@ -606,7 +609,7 @@ typedef struct
 #define __av_start_struct3(LIST)  \
   0
 #endif
-#if defined(__mips__) && !defined(__mipsn32__)
+#if defined(__mips__) && !defined(__mipsn32__) && !defined(__mips64__)
 #define __av_reg_struct_return(LIST,TYPE_SIZE,TYPE_SPLITTABLE)  \
   ((TYPE_SIZE) == 1 || (TYPE_SIZE) == 2 || (TYPE_SIZE) == 4)
 /* Test __AV_SMALL_STRUCT_RETURN instead of __AV_REGISTER_STRUCT_RETURN. */
@@ -768,7 +771,7 @@ typedef struct
 /* `long long' fits in __avword. */
 #define av_longlong            __av_word
 #define av_ulonglong(LIST,VAL) __av_word(LIST,(unsigned long long)(VAL))
-#elif defined(__i386__) || defined(__m68k__) || defined(__mips__) || 
(defined(__sparc__) && !defined(__sparc64__)) || defined(__hppa__) || 
defined(__arm__) || defined(__rs6000__) || defined(__m88k__) || 
defined(__convex__) || defined(__s390__)
+#elif defined(__i386__) || defined(__m68k__) || (defined(__mips__) && 
!defined(__mipsn32__) && !defined(__mips64__)) || (defined(__sparc__) && 
!defined(__sparc64__)) || defined(__hppa__) || defined(__arm__) || 
defined(__rs6000__) || defined(__m88k__) || defined(__convex__) || 
defined(__s390__)
 /* `long long's are passed embedded on the arg stack. */
 #define av_longlong(LIST,VAL)  __av_longlong(LIST,long long,VAL)
 #define av_ulonglong(LIST,VAL) __av_longlong(LIST,unsigned long long,VAL)
@@ -840,7 +843,7 @@ typedef struct
 
 #endif
 
-#if defined(__mips__) && !defined(__mipsn32__)
+#if defined(__mips__) && !defined(__mipsn32__) && !defined(__mips64__)
 
 /* Up to 2 leading float or double non-varargs args can be passed in
  * float registers, but we also push them into the corresponding int
@@ -851,22 +854,22 @@ typedef struct
   (++(LIST).aptr > __av_eptr(LIST)                                     \
    ? -1 : ((++(LIST).anum == 1                                         \
            ? ((LIST).flags |= __AV_FLOAT_1,                            \
-              ((float*)(LIST).floatarg)[1] = ((float*)(LIST).aptr)[-1] = 
(float)(VAL))\
-           : (LIST).anum == 2 && ((LIST).flags & __AV_FLOAT_1)         \
+              (LIST).floatarg[0] = ((float*)(LIST).aptr)[-1] = (float)(VAL))\
+           : (LIST).anum == 2 && (((LIST).flags & __AV_FLOAT_1) || 
((LIST).flags & __AV_DOUBLE_1))\
            ? ((LIST).flags |= __AV_FLOAT_2,                            \
-              ((float*)(LIST).floatarg)[3] = ((float*)(LIST).aptr)[-1] = 
(float)(VAL))\
-           : (*(float*)&(LIST).aptr[-1] = (float)(VAL))),              \
+              (LIST).floatarg[1] = ((float*)(LIST).aptr)[-1] = (float)(VAL))\
+           : (((float*)(LIST).aptr)[-1] = (float)(VAL))),              \
           0))
 
 #define av_double(LIST,VAL)                                            \
   (((LIST).aptr = (__avword*)(((__avword)(LIST).aptr+15)&-8))          \
    > __av_eptr(LIST)                                                   \
    ? -1 : ((++(LIST).anum == 1                                         \
-           ? ((LIST).flags |= __AV_FLOAT_1,                            \
-              (LIST).floatarg[0] = ((double*)(LIST).aptr)[-1] = (double)(VAL))\
-           : (LIST).anum == 2 && ((LIST).flags & __AV_FLOAT_1)         \
-           ? ((LIST).flags |= __AV_FLOAT_2,                            \
-              (LIST).floatarg[1] = ((double*)(LIST).aptr)[-1] = (double)(VAL))\
+           ? ((LIST).flags |= __AV_DOUBLE_1,                           \
+              (LIST).doublearg[0] = ((double*)(LIST).aptr)[-1] = 
(double)(VAL))\
+           : (LIST).anum == 2 && (((LIST).flags & __AV_FLOAT_1) || 
((LIST).flags & __AV_DOUBLE_1))\
+           ? ((LIST).flags |= __AV_DOUBLE_2,                           \
+              (LIST).doublearg[1] = ((double*)(LIST).aptr)[-1] = 
(double)(VAL))\
            : (((double*)(LIST).aptr)[-1] = (double)(VAL))),            \
           0))
 
@@ -1145,7 +1148,7 @@ typedef struct
            0)))
 #endif
 /* small structures < 1 word are adjusted depending on compiler */
-#if defined(__mips__) && !defined(__mipsn32__)
+#if defined(__mips__) && !defined(__mipsn32__) && !defined(__mips64__)
 #define __av_struct_leftadjusted(LIST,TYPE,TYPE_SIZE,TYPE_ALIGN,ASSIGN,VAL) \
   (((LIST).aptr =                                                      \
     (__avword*)(((__avword)(LIST).aptr+(TYPE_SIZE)+(TYPE_ALIGN)-1) & 
-(long)(TYPE_ALIGN)))\di
diff -uprN ffcall-1.10.orig/ffcall/callback/trampoline_r/tramp-mips.s 
ffcall-1.10/ffcall/callback/trampoline_r/tramp-mips.s
--- ffcall-1.10.orig/ffcall/callback/trampoline_r/tramp-mips.s  2004-01-26 
14:59:44.000000000 +0000
+++ ffcall-1.10/ffcall/callback/trampoline_r/tramp-mips.s       2005-03-20 
00:57:11.000000000 +0000
@@ -17,12 +17,12 @@
        .globl  tramp
        .ent    tramp
 tramp:
-       li      $2,0x73550000
-       ori     $2,$2,0x4711
-       li      $25,0xbabe0000
-       ori     $25,$25,0xbec0
+       /* We can assume that our own address (=tramp) is in $25. */
+       lw      $2,$LC0-tramp($25)
+       lw      $25,$LC1-tramp($25)
        /* The called function expects to see its own address in $25. */
        j       $25
-       /* Some Mips hardware running Irix-4.0.5 needs this nop. */
-       nop
+        nop
+$LC0:  .word   0x73554711
+$LC1:  .word   0xbabebec0
        .end    tramp
diff -uprN ffcall-1.10.orig/ffcall/callback/trampoline_r/trampoline.c 
ffcall-1.10/ffcall/callback/trampoline_r/trampoline.c
--- ffcall-1.10.orig/ffcall/callback/trampoline_r/trampoline.c  2005-03-23 
19:25:26.000000000 +0000
+++ ffcall-1.10/ffcall/callback/trampoline_r/trampoline.c       2005-03-20 
00:57:11.000000000 +0000
@@ -242,11 +242,7 @@ extern void __TR_clear_cache();
 #define TRAMP_LENGTH 14
 #define TRAMP_ALIGN 16
 #endif
-#if defined(__mips__) && !defined(__mipsn32__)
-#define TRAMP_LENGTH 24
-#define TRAMP_ALIGN 4
-#endif
-#ifdef __mipsn32__
+#if defined(__mips__) || defined(__mipsn32__) && !defined(__mips64__)
 #define TRAMP_LENGTH 24
 #define TRAMP_ALIGN 4
 #endif
@@ -459,41 +455,7 @@ __TR_function alloc_trampoline_r (addres
 #define tramp_data(function)  \
   *(long *)  (function + 2)
 #endif
-#if defined(__mips__) && !defined(__mipsn32__)
-  /* function:
-   *    li $2,<data>&0xffff0000                3C 02 hi16(<data>)
-   *    ori $2,$2,<data>&0xffff                34 42 lo16(<data>)
-   *    li $25,<address>&0xffff0000    3C 19 hi16(<address>)
-   *    ori $25,$25,<address>&0xffff   37 39 lo16(<address>)
-   *    j $25                          03 20 00 08
-   *    nop                            00 00 00 00
-   */
-  /* What about big endian / little endian ?? */
-  *(short *) (function + 0) = 0x3C02;
-  *(short *) (function + 2) = (unsigned long) data >> 16;
-  *(short *) (function + 4) = 0x3442;
-  *(short *) (function + 6) = (unsigned long) data & 0xffff;
-  *(short *) (function + 8) = 0x3C19;
-  *(short *) (function +10) = (unsigned long) address >> 16;
-  *(short *) (function +12) = 0x3739;
-  *(short *) (function +14) = (unsigned long) address & 0xffff;
-  *(long *)  (function +16) = 0x03200008;
-  *(long *)  (function +20) = 0x00000000;
-#define is_tramp(function)  \
-  *(unsigned short *) (function + 0) == 0x3C02 && \
-  *(unsigned short *) (function + 4) == 0x3442 && \
-  *(unsigned short *) (function + 8) == 0x3C19 && \
-  *(unsigned short *) (function +12) == 0x3739 && \
-  *(unsigned long *)  (function +16) == 0x03200008 && \
-  *(unsigned long *)  (function +20) == 0x00000000
-#define hilo(hiword,loword)  \
-  (((unsigned long) (hiword) << 16) | (unsigned long) (loword))
-#define tramp_address(function)  \
-  hilo(*(unsigned short *) (function +10), *(unsigned short *) (function +14))
-#define tramp_data(function)  \
-  hilo(*(unsigned short *) (function + 2), *(unsigned short *) (function + 6))
-#endif
-#ifdef __mipsn32__
+#if defined(__mips__) || defined(__mipsn32__) && !defined(__mips64__)
   /* function:
    *    lw $2,16($25)                  8F 22 00 10
    *    lw $25,20($25)                 8F 39 00 14
@@ -502,7 +464,6 @@ __TR_function alloc_trampoline_r (addres
    *    .word <data>                   <data>
    *    .word <address>                        <address>
    */
-  /* What about big endian / little endian ?? */
   *(unsigned int *) (function + 0) = 0x8F220010;
   *(unsigned int *) (function + 4) = 0x8F390014;
   *(unsigned int *) (function + 8) = 0x03200008;
@@ -510,10 +471,10 @@ __TR_function alloc_trampoline_r (addres
   *(unsigned int *) (function +16) = (unsigned int) data;
   *(unsigned int *) (function +20) = (unsigned int) address;
 #define is_tramp(function)  \
-  *(int *)          (function + 0) == 0x8F220010 && \
-  *(int *)          (function + 4) == 0x8F390014 && \
-  *(int *)          (function + 8) == 0x03200008 && \
-  *(int *)          (function +12) == 0x00000000
+  *(unsigned int *) (function + 0) == 0x8F220010 && \
+  *(unsigned int *) (function + 4) == 0x8F390014 && \
+  *(unsigned int *) (function + 8) == 0x03200008 && \
+  *(unsigned int *) (function +12) == 0x00000000
 #define tramp_address(function)  \
   *(unsigned int *) (function +20)
 #define tramp_data(function)  \
@@ -597,14 +558,17 @@ __TR_function alloc_trampoline_r (addres
    *    .dword <data>                  <data>
    *    .dword <address>               <address>
    */
-  /* What about big endian / little endian ?? */
-  *(long *)          (function + 0) = 0xDF220010DF390018L;
-  *(long *)          (function + 8) = 0x0320000800000000L;
+  *(unsigned int *)  (function + 0) = 0xDF220010;
+  *(unsigned int *)  (function + 4) = 0xDF390018;
+  *(unsigned int *)  (function + 8) = 0x03200008;
+  *(unsigned int *)  (function +12) = 0x00000000;
   *(unsigned long *) (function +16) = (unsigned long) data;
   *(unsigned long *) (function +24) = (unsigned long) address;
 #define is_tramp(function)  \
-  *(long *)          (function + 0) == 0xDF220010DF390018L && \
-  *(long *)          (function + 8) == 0x0320000800000000L
+  *(unsigned int *)  (function + 0) == 0xDF220010 && \
+  *(unsigned int *)  (function + 4) == 0xDF390018 && \
+  *(unsigned int *)  (function + 8) == 0x03200008 && \
+  *(unsigned int *)  (function +12) == 0x00000000
 #define tramp_address(function)  \
   *(unsigned long *) (function +24)
 #define tramp_data(function)  \
diff -uprN ffcall-1.10.orig/ffcall/callback/vacall_r/vacall_r.h.in 
ffcall-1.10/ffcall/callback/vacall_r/vacall_r.h.in
--- ffcall-1.10.orig/ffcall/callback/vacall_r/vacall_r.h.in     2004-06-02 
19:18:12.000000000 +0000
+++ ffcall-1.10/ffcall/callback/vacall_r/vacall_r.h.in  2005-03-20 
00:57:12.000000000 +0000
@@ -331,7 +331,7 @@ enum __VA_alist_flags
 #if defined(__i386__) || defined(__m68k__) || defined(__mipsn32__) || 
defined(__mips64__) || defined(__sparc64__) || defined(__alpha__) || 
defined(__arm__) || defined(__rs6000__) || defined(__convex__) || 
defined(__ia64__) || defined(__x86_64__) || defined(__s390__)
   __VA_REGISTER_STRUCT_RETURN  = 1<<10,
 #endif
-#if defined(__mips__) && !defined(__mipsn32__)
+#if defined(__mips__) && !defined(__mipsn32__) && !defined(__mips64__)
   __VA_FLOAT_1                 = 1<<11,
   __VA_FLOAT_2                 = 1<<12,
 #endif
@@ -395,7 +395,7 @@ typedef struct
   float          farg[4];
   double         darg[2];
 #endif
-#if defined(__mips__) && !defined(__mipsn32__)
+#if defined(__mips__) && !defined(__mipsn32__) && !defined(__mips64__)
   int            anum;
   float          farg[2];
   double         darg[2];
@@ -545,11 +545,11 @@ typedef __va_alist* va_alist;
 #define __va_start_struct1(LIST,TYPE_SIZE,TYPE_ALIGN,TYPE_SPLITTABLE)  \
   0
 #endif
-#if defined(__mips__) && !defined(__mipsn32__) || (defined(__sparc__) && 
!defined(__sparc64__)) || defined(__m88k__)
+#if defined(__mips__) && !defined(__mipsn32__) && !defined(__mips64__) || 
(defined(__sparc__) && !defined(__sparc64__)) || defined(__m88k__)
 #define __va_reg_struct_return(LIST,TYPE_SIZE,TYPE_SPLITTABLE)  \
   ((TYPE_SIZE) == 1 || (TYPE_SIZE) == 2 || (TYPE_SIZE) == 4)
 /* Test __VA_SMALL_STRUCT_RETURN instead of __VA_REGISTER_STRUCT_RETURN. */
-#if defined(__mips__) && !defined(__mipsn32__)
+#if defined(__mips__) && !defined(__mipsn32__) && !defined(__mips64__)
 #define __va_start_struct1(LIST,TYPE_SIZE,TYPE_ALIGN,TYPE_SPLITTABLE)  \
   ((LIST)->anum++,                                                     \
    0                                                                   \
@@ -651,7 +651,7 @@ typedef __va_alist* va_alist;
 /* Padding of non-struct arguments. */
 #define __va_argsize(TYPE_SIZE)  \
   (((TYPE_SIZE) + sizeof(__vaword)-1) & -(long)sizeof(__vaword))
-#if defined(__i386__) || defined(__m68k__) || defined(__mips__) && 
!defined(__mipsn32__) || (defined(__sparc__) && !defined(__sparc64__)) || 
defined(__alpha__) || defined(__arm__) || (defined(__rs6000__) && 
(defined(_AIX) || (defined(__MACH__) && defined(__APPLE__)))) || 
defined(__m88k__) || defined(__convex__) || defined(__ia64__) || 
defined(__x86_64__)
+#if defined(__i386__) || defined(__m68k__) || defined(__mips__) && 
!defined(__mipsn32__) && !defined(__mips64__) || (defined(__sparc__) && 
!defined(__sparc64__)) || defined(__alpha__) || defined(__arm__) || 
(defined(__rs6000__) && (defined(_AIX) || (defined(__MACH__) && 
defined(__APPLE__)))) || defined(__m88k__) || defined(__convex__) || 
defined(__ia64__) || defined(__x86_64__)
 /* args grow up */
 /* small structures < 1 word are adjusted depending on compiler */
 #define __va_arg_leftadjusted(LIST,TYPE_SIZE,TYPE_ALIGN)  \
@@ -737,20 +737,26 @@ typedef __va_alist* va_alist;
    (LIST)->aptr + ((-(TYPE_SIZE)) & 3)                                 \
   )
 #endif
-#if defined(__i386__) || defined(__alpha__) || defined(__ia64__)
+#if defined(__i386__) || defined(__alpha__) || defined(__ia64__) || 
((defined(__mipsn32__) || defined(__mips64__)) && defined(_MIPSEL))
 /* little endian -> small args < 1 word are adjusted to the left */
 #define __va_arg_adjusted(LIST,TYPE_SIZE,TYPE_ALIGN)  \
   __va_arg_leftadjusted(LIST,TYPE_SIZE,TYPE_ALIGN)
 #endif
-#if defined(__m68k__) || defined(__mipsn32__) || defined(__mips64__) || 
defined(__sparc__) || defined(__sparc64__) || defined(__hppa__) || 
defined(__arm__) || defined(__rs6000__) || defined(__m88k__) || 
defined(__convex__) || defined(__s390__)
+#if defined(__m68k__) || ((defined(__mipsn32__) || defined(__mips64__)) && 
defined(_MIPSEB)) || defined(__sparc__) || defined(__sparc64__) || 
defined(__hppa__) || defined(__arm__) || defined(__rs6000__) || 
defined(__m88k__) || defined(__convex__) || defined(__s390__)
 /* big endian -> small args < 1 word are adjusted to the right */
 #define __va_arg_adjusted(LIST,TYPE_SIZE,TYPE_ALIGN)  \
   __va_arg_rightadjusted(LIST,TYPE_SIZE,TYPE_ALIGN)
 #endif
-#if defined(__mips__) && !defined(__mipsn32__)
+#if defined(__mips__) && !defined(__mipsn32__) && !defined(__mips64__)
+#ifdef _MIPSEB
 /* big endian -> small args < 1 word are adjusted to the right */
 #define __va_arg_adjusted(LIST,TYPE_SIZE,TYPE_ALIGN)  \
   ((LIST)->anum++, __va_arg_rightadjusted(LIST,TYPE_SIZE,TYPE_ALIGN))
+#else /* _MIPSEL */
+/* little endian -> small args < 1 word are adjusted to the left */
+#define __va_arg_adjusted(LIST,TYPE_SIZE,TYPE_ALIGN)  \
+  ((LIST)->anum++, __va_arg_leftadjusted(LIST,TYPE_SIZE,TYPE_ALIGN))
+#endif
 #endif
 #if defined(__x86_64__)
 /* the first 6 argument words are passed in registers */
@@ -828,7 +834,7 @@ typedef __va_alist* va_alist;
 #if defined(__i386__) || defined(__m68k__) || defined(__mipsn32__) || 
defined(__mips64__) || defined(__sparc__) || defined(__sparc64__) || 
defined(__alpha__) || defined(__arm__) || defined(__rs6000__) || 
defined(__convex__) || defined(__ia64__) || defined(__x86_64__) || 
defined(__s390__)
 #define __va_align_double(LIST)
 #endif
-#if defined(__mips__) && !defined(__mipsn32__) || defined(__m88k__)
+#if defined(__mips__) && !defined(__mipsn32__) && !defined(__mips64__) || 
defined(__m88k__)
 /* __VA_alignof(double) > sizeof(__vaword) */
 #define __va_align_double(LIST)  \
   (LIST)->aptr = ((LIST)->aptr + sizeof(double)-1) & -(long)sizeof(double),
@@ -893,7 +899,7 @@ typedef __va_alist* va_alist;
   ))
 #endif
 #endif
-#if defined(__mips__) && !defined(__mipsn32__)
+#if defined(__mips__) && !defined(__mipsn32__) && !defined(__mips64__)
 /* The first 0,1,2 registers are stored elsewhere if they are floating-point
  * parameters.
  */
@@ -1092,7 +1098,7 @@ typedef __va_alist* va_alist;
    __va_arg_adjusted(LIST,TYPE_SIZE,TYPE_ALIGN)                                
\
   )
 #endif
-#if defined(__mips__) && !defined(__mipsn32__)
+#if defined(__mips__) && !defined(__mipsn32__) && !defined(__mips64__)
 /* small structures < 1 word are adjusted depending on compiler */
 #define __va_arg_struct(LIST,TYPE_SIZE,TYPE_ALIGN)  \
   (__va_align_struct(LIST,TYPE_SIZE,TYPE_ALIGN)                                
\
diff -uprN ffcall-1.10.orig/ffcall/m4/general.m4 
ffcall-1.10/ffcall/m4/general.m4
--- ffcall-1.10.orig/ffcall/m4/general.m4       2002-08-02 11:56:46.000000000 
+0000
+++ ffcall-1.10/ffcall/m4/general.m4    2005-03-20 19:59:13.000000000 +0000
@@ -150,7 +150,7 @@ changequote(,)dnl
     host_cpu=arm
     ;;
 changequote([,])dnl
-  mips )
+  mips* )
     AC_CACHE_CHECK([for 64-bit MIPS], cl_cv_host_mips64, [
 AC_EGREP_CPP(yes,
 [#if defined(_MIPS_SZLONG)
@@ -163,6 +163,8 @@ AC_EGREP_CPP(yes,
 ])
 if test $cl_cv_host_mips64 = yes; then
   host_cpu=mips64
+else
+  host_cpu=mips
 fi
     ;;
 dnl UltraSPARCs running Linux have `uname -m` = "sparc64", but the C compiler
@@ -213,7 +215,7 @@ changequote(,)dnl
     host_cpu=arm
     ;;
 changequote([,])dnl
-  mips )
+  mips* )
     AC_CACHE_CHECK([for 64-bit MIPS], cl_cv_host_mips64, [
 AC_EGREP_CPP(yes,
 [#if defined(_MIPS_SZLONG)
@@ -239,6 +241,8 @@ AC_EGREP_CPP(yes,
 ])
 if test $cl_cv_host_mipsn32 = yes; then
   host_cpu=mipsn32
+else
+  host_cpu=mips
 fi
 fi
     ;;
diff -uprN ffcall-1.10.orig/ffcall/vacall/vacall.h.in 
ffcall-1.10/ffcall/vacall/vacall.h.in
--- ffcall-1.10.orig/ffcall/vacall/vacall.h.in  2004-06-02 18:47:04.000000000 
+0000
+++ ffcall-1.10/ffcall/vacall/vacall.h.in       2005-03-20 00:57:12.000000000 
+0000
@@ -331,7 +331,7 @@ enum __VA_alist_flags
 #if defined(__i386__) || defined(__m68k__) || defined(__mipsn32__) || 
defined(__mips64__) || defined(__sparc64__) || defined(__alpha__) || 
defined(__arm__) || defined(__rs6000__) || defined(__convex__) || 
defined(__ia64__) || defined(__x86_64__) || defined(__s390__)
   __VA_REGISTER_STRUCT_RETURN  = 1<<10,
 #endif
-#if defined(__mips__) && !defined(__mipsn32__)
+#if defined(__mips__) && !defined(__mipsn32__) && !defined(__mips64__)
   __VA_FLOAT_1                 = 1<<11,
   __VA_FLOAT_2                 = 1<<12,
 #endif
@@ -395,7 +395,7 @@ typedef struct
   float          farg[4];
   double         darg[2];
 #endif
-#if defined(__mips__) && !defined(__mipsn32__)
+#if defined(__mips__) && !defined(__mipsn32__) && !defined(__mips64__)
   int            anum;
   float          farg[2];
   double         darg[2];
@@ -545,11 +545,11 @@ typedef __va_alist* va_alist;
 #define __va_start_struct1(LIST,TYPE_SIZE,TYPE_ALIGN,TYPE_SPLITTABLE)  \
   0
 #endif
-#if defined(__mips__) && !defined(__mipsn32__) || (defined(__sparc__) && 
!defined(__sparc64__)) || defined(__m88k__)
+#if defined(__mips__) && !defined(__mipsn32__) && !defined(__mips64__) || 
(defined(__sparc__) && !defined(__sparc64__)) || defined(__m88k__)
 #define __va_reg_struct_return(LIST,TYPE_SIZE,TYPE_SPLITTABLE)  \
   ((TYPE_SIZE) == 1 || (TYPE_SIZE) == 2 || (TYPE_SIZE) == 4)
 /* Test __VA_SMALL_STRUCT_RETURN instead of __VA_REGISTER_STRUCT_RETURN. */
-#if defined(__mips__) && !defined(__mipsn32__)
+#if defined(__mips__) && !defined(__mipsn32__) && !defined(__mips64__)
 #define __va_start_struct1(LIST,TYPE_SIZE,TYPE_ALIGN,TYPE_SPLITTABLE)  \
   ((LIST)->anum++,                                                     \
    0                                                                   \
@@ -651,7 +651,7 @@ typedef __va_alist* va_alist;
 /* Padding of non-struct arguments. */
 #define __va_argsize(TYPE_SIZE)  \
   (((TYPE_SIZE) + sizeof(__vaword)-1) & -(long)sizeof(__vaword))
-#if defined(__i386__) || defined(__m68k__) || defined(__mips__) && 
!defined(__mipsn32__) || (defined(__sparc__) && !defined(__sparc64__)) || 
defined(__alpha__) || defined(__arm__) || (defined(__rs6000__) && 
(defined(_AIX) || (defined(__MACH__) && defined(__APPLE__)))) || 
defined(__m88k__) || defined(__convex__) || defined(__ia64__) || 
defined(__x86_64__)
+#if defined(__i386__) || defined(__m68k__) || defined(__mips__) && 
!defined(__mipsn32__) && !defined(__mips64__) || (defined(__sparc__) && 
!defined(__sparc64__)) || defined(__alpha__) || defined(__arm__) || 
(defined(__rs6000__) && (defined(_AIX) || (defined(__MACH__) && 
defined(__APPLE__)))) || defined(__m88k__) || defined(__convex__) || 
defined(__ia64__) || defined(__x86_64__)
 /* args grow up */
 /* small structures < 1 word are adjusted depending on compiler */
 #define __va_arg_leftadjusted(LIST,TYPE_SIZE,TYPE_ALIGN)  \
@@ -747,7 +747,7 @@ typedef __va_alist* va_alist;
 #define __va_arg_adjusted(LIST,TYPE_SIZE,TYPE_ALIGN)  \
   __va_arg_rightadjusted(LIST,TYPE_SIZE,TYPE_ALIGN)
 #endif
-#if defined(__mips__) && !defined(__mipsn32__)
+#if defined(__mips__) && !defined(__mipsn32__) && !defined(__mips64__)
 /* big endian -> small args < 1 word are adjusted to the right */
 #define __va_arg_adjusted(LIST,TYPE_SIZE,TYPE_ALIGN)  \
   ((LIST)->anum++, __va_arg_rightadjusted(LIST,TYPE_SIZE,TYPE_ALIGN))
@@ -828,7 +828,7 @@ typedef __va_alist* va_alist;
 #if defined(__i386__) || defined(__m68k__) || defined(__mipsn32__) || 
defined(__mips64__) || defined(__sparc__) || defined(__sparc64__) || 
defined(__alpha__) || defined(__arm__) || defined(__rs6000__) || 
defined(__convex__) || defined(__ia64__) || defined(__x86_64__) || 
defined(__s390__)
 #define __va_align_double(LIST)
 #endif
-#if defined(__mips__) && !defined(__mipsn32__) || defined(__m88k__)
+#if defined(__mips__) && !defined(__mipsn32__) && !defined(__mips64__) || 
defined(__m88k__)
 /* __VA_alignof(double) > sizeof(__vaword) */
 #define __va_align_double(LIST)  \
   (LIST)->aptr = ((LIST)->aptr + sizeof(double)-1) & -(long)sizeof(double),
@@ -893,7 +893,7 @@ typedef __va_alist* va_alist;
   ))
 #endif
 #endif
-#if defined(__mips__) && !defined(__mipsn32__)
+#if defined(__mips__) && !defined(__mipsn32__) && !defined(__mips64__)
 /* The first 0,1,2 registers are stored elsewhere if they are floating-point
  * parameters.
  */
@@ -1092,7 +1092,7 @@ typedef __va_alist* va_alist;
    __va_arg_adjusted(LIST,TYPE_SIZE,TYPE_ALIGN)                                
\
   )
 #endif
-#if defined(__mips__) && !defined(__mipsn32__)
+#if defined(__mips__) && !defined(__mipsn32__) && !defined(__mips64__)
 /* small structures < 1 word are adjusted depending on compiler */
 #define __va_arg_struct(LIST,TYPE_SIZE,TYPE_ALIGN)  \
   (__va_align_struct(LIST,TYPE_SIZE,TYPE_ALIGN)                                
\


-- 
To UNSUBSCRIBE, email to [EMAIL PROTECTED]
with a subject of "unsubscribe". Trouble? Contact [EMAIL PROTECTED]

Reply via email to