On Wed, 2008-01-23 12:09:10 +0100, Jan-Benedict Glaw <[EMAIL PROTECTED]> wrote: > I do have a crude patch to ELFify the assembly parts. However,
This was ment to be attached... MfG, JBG -- Jan-Benedict Glaw [EMAIL PROTECTED] +49-172-7608481 Signature of: Ich hatte in letzter Zeit ein bißchen viel Realitycheck. the second : Langsam möchte ich mal wieder weiterträumen können. -- Maximilian Wilhelm (18. Mai 2006, #lug-owl.de)
--- gmp-4.2.1/mpn/vax/addmul_1.s~ 2006-10-24 21:53:39.000000000 +0200 +++ gmp-4.2.1/mpn/vax/addmul_1.s 2006-10-25 08:25:24.000000000 +0200 @@ -1,7 +1,7 @@ # VAX __gmpn_addmul_1 -- Multiply a limb vector with a limb and add # the result to a second limb vector. -# Copyright 1992, 1994, 1996, 2000 Free Software Foundation, Inc. +# Copyright 1992, 1994, 1996, 2000, 2006 Free Software Foundation, Inc. # This file is part of the GNU MP Library. @@ -29,98 +29,99 @@ .text .align 1 -.globl ___gmpn_addmul_1 -___gmpn_addmul_1: +.globl __gmpn_addmul_1 +.type __gmpn_addmul_1, @function +__gmpn_addmul_1: .word 0xfc0 - movl 12(ap),r4 - movl 8(ap),r8 - movl 4(ap),r9 - movl 16(ap),r6 + movl 12(%ap),%r4 + movl 8(%ap),%r8 + movl 4(%ap),%r9 + movl 16(%ap),%r6 jlss s2_big - clrl r3 - incl r4 - ashl $-1,r4,r7 - jlbc r4,L1 - clrl r11 + clrl %r3 + incl %r4 + ashl $-1,%r4,%r7 + jlbc %r4,L1 + clrl %r11 # Loop for S2_LIMB < 0x80000000 -Loop1: movl (r8)+,r1 +Loop1: movl (%r8)+,%r1 jlss L1n0 - emul r1,r6,$0,r2 - addl2 r11,r2 - adwc $0,r3 - addl2 r2,(r9)+ - adwc $0,r3 -L1: movl (r8)+,r1 + emul %r1,%r6,$0,%r2 + addl2 %r11,%r2 + adwc $0,%r3 + addl2 %r2,(%r9)+ + adwc $0,%r3 +L1: movl (%r8)+,%r1 jlss L1n1 -L1p1: emul r1,r6,$0,r10 - addl2 r3,r10 - adwc $0,r11 - addl2 r10,(r9)+ - adwc $0,r11 +L1p1: emul %r1,%r6,$0,%r10 + addl2 %r3,%r10 + adwc $0,%r11 + addl2 %r10,(%r9)+ + adwc $0,%r11 - sobgtr r7,Loop1 - movl r11,r0 + sobgtr %r7,Loop1 + movl %r11,%r0 ret -L1n0: emul r1,r6,$0,r2 - addl2 r11,r2 - adwc r6,r3 - addl2 r2,(r9)+ - adwc $0,r3 - movl (r8)+,r1 +L1n0: emul %r1,%r6,$0,%r2 + addl2 %r11,%r2 + adwc %r6,%r3 + addl2 %r2,(%r9)+ + adwc $0,%r3 + movl (%r8)+,%r1 jgeq L1p1 -L1n1: emul r1,r6,$0,r10 - addl2 r3,r10 - adwc r6,r11 - addl2 r10,(r9)+ - adwc $0,r11 +L1n1: emul %r1,%r6,$0,%r10 + addl2 %r3,%r10 + adwc %r6,%r11 + addl2 %r10,(%r9)+ + adwc $0,%r11 - sobgtr r7,Loop1 - movl r11,r0 + sobgtr %r7,Loop1 + movl %r11,%r0 ret -s2_big: clrl r3 - incl r4 - ashl $-1,r4,r7 - jlbc r4,L2 - clrl r11 +s2_big: clrl %r3 + incl %r4 + ashl $-1,%r4,%r7 + jlbc %r4,L2 + clrl %r11 # Loop for S2_LIMB >= 0x80000000 -Loop2: movl (r8)+,r1 +Loop2: movl (%r8)+,%r1 jlss L2n0 - emul r1,r6,$0,r2 - addl2 r11,r2 - adwc r1,r3 - addl2 r2,(r9)+ - adwc $0,r3 -L2: movl (r8)+,r1 + emul %r1,%r6,$0,%r2 + addl2 %r11,%r2 + adwc %r1,%r3 + addl2 %r2,(%r9)+ + adwc $0,%r3 +L2: movl (%r8)+,%r1 jlss L2n1 -L2p1: emul r1,r6,$0,r10 - addl2 r3,r10 - adwc r1,r11 - addl2 r10,(r9)+ - adwc $0,r11 +L2p1: emul %r1,%r6,$0,%r10 + addl2 %r3,%r10 + adwc %r1,%r11 + addl2 %r10,(%r9)+ + adwc $0,%r11 - sobgtr r7,Loop2 - movl r11,r0 + sobgtr %r7,Loop2 + movl %r11,%r0 ret -L2n0: emul r1,r6,$0,r2 - addl2 r11,r2 - adwc r6,r3 - addl2 r2,(r9)+ - adwc r1,r3 - movl (r8)+,r1 +L2n0: emul %r1,%r6,$0,%r2 + addl2 %r11,%r2 + adwc %r6,%r3 + addl2 %r2,(%r9)+ + adwc %r1,%r3 + movl (%r8)+,%r1 jgeq L2p1 -L2n1: emul r1,r6,$0,r10 - addl2 r3,r10 - adwc r6,r11 - addl2 r10,(r9)+ - adwc r1,r11 +L2n1: emul %r1,%r6,$0,%r10 + addl2 %r3,%r10 + adwc %r6,%r11 + addl2 %r10,(%r9)+ + adwc %r1,%r11 - sobgtr r7,Loop2 - movl r11,r0 + sobgtr %r7,Loop2 + movl %r11,%r0 ret --- gmp-4.2.1/mpn/vax/add_n.s~ 2006-10-24 21:48:09.000000000 +0200 +++ gmp-4.2.1/mpn/vax/add_n.s 2006-10-25 08:33:00.000000000 +0200 @@ -1,7 +1,7 @@ # VAX __gmpn_add_n -- Add two limb vectors of the same length > 0 and store # sum in a third limb vector. -# Copyright 1999, 2000 Free Software Foundation, Inc. +# Copyright 1999, 2000, 2006 Free Software Foundation, Inc. # This file is part of the GNU MP Library. @@ -29,33 +29,34 @@ .text .align 1 -.globl ___gmpn_add_n -___gmpn_add_n: +.globl __gmpn_add_n +.type __gmpn_add_n, @function +__gmpn_add_n: .word 0x0 - movl 16(ap),r0 - movl 12(ap),r1 - movl 8(ap),r2 - movl 4(ap),r3 - mnegl r0,r5 - addl2 $3,r0 - ashl $-2,r0,r0 # unroll loop count - bicl2 $-4,r5 # mask out low 2 bits - movaq (r5)[r5],r5 # 9x - jmp Loop(r5) - -Loop: movl (r2)+,r4 - adwc (r1)+,r4 - movl r4,(r3)+ - movl (r2)+,r4 - adwc (r1)+,r4 - movl r4,(r3)+ - movl (r2)+,r4 - adwc (r1)+,r4 - movl r4,(r3)+ - movl (r2)+,r4 - adwc (r1)+,r4 - movl r4,(r3)+ - sobgtr r0,Loop + movl 16(%ap),%r0 + movl 12(%ap),%r1 + movl 8(%ap),%r2 + movl 4(%ap),%r3 + mnegl %r0,%r5 + addl2 $3,%r0 + ashl $-2,%r0,%r0 # unroll loop count + bicl2 $-4,%r5 # mask out low 2 bits + movaq (%r5)[%r5],%r5 # 9x + jmp Loop(%r5) + +Loop: movl (%r2)+,%r4 + adwc (%r1)+,%r4 + movl %r4,(%r3)+ + movl (%r2)+,%r4 + adwc (%r1)+,%r4 + movl %r4,(%r3)+ + movl (%r2)+,%r4 + adwc (%r1)+,%r4 + movl %r4,(%r3)+ + movl (%r2)+,%r4 + adwc (%r1)+,%r4 + movl %r4,(%r3)+ + sobgtr %r0,Loop - adwc r0,r0 + adwc %r0,%r0 ret --- gmp-4.2.1/mpn/vax/lshift.s~ 2006-10-24 21:59:29.000000000 +0200 +++ gmp-4.2.1/mpn/vax/lshift.s 2006-10-25 08:27:31.000000000 +0200 @@ -1,6 +1,6 @@ # VAX mpn_lshift -- left shift. -# Copyright 1999, 2000, 2001 Free Software Foundation, Inc. +# Copyright 1999, 2000, 2001, 2006 Free Software Foundation, Inc. # This file is part of the GNU MP Library. @@ -30,29 +30,30 @@ .text .align 1 -.globl ___gmpn_lshift -___gmpn_lshift: +.globl __gmpn_lshift +.type __gmpn_lshift, @function +__gmpn_lshift: .word 0x1c0 - movl 4(ap),r7 - movl 8(ap),r6 - movl 12(ap),r1 - movl 16(ap),r8 - - moval (r6)[r1],r6 - moval (r7)[r1],r7 - clrl r3 - movl -(r6),r2 - ashq r8,r2,r4 - movl r5,r0 - movl r2,r3 - decl r1 + movl 4(%ap),%r7 + movl 8(%ap),%r6 + movl 12(%ap),%r1 + movl 16(%ap),%r8 + + moval (%r6)[%r1],%r6 + moval (%r7)[%r1],%r7 + clrl %r3 + movl -(%r6),%r2 + ashq %r8,%r2,%r4 + movl %r5,%r0 + movl %r2,%r3 + decl %r1 jeql Lend -Loop: movl -(r6),r2 - ashq r8,r2,r4 - movl r5,-(r7) - movl r2,r3 - sobgtr r1,Loop +Loop: movl -(%r6),%r2 + ashq %r8,%r2,%r4 + movl %r5,-(%r7) + movl %r2,%r3 + sobgtr %r1,Loop -Lend: movl r4,-4(r7) +Lend: movl %r4,-4(%r7) ret --- gmp-4.2.1/mpn/vax/mul_1.s~ 2006-10-24 21:50:43.000000000 +0200 +++ gmp-4.2.1/mpn/vax/mul_1.s 2006-10-25 08:27:44.000000000 +0200 @@ -1,7 +1,7 @@ # VAX __gmpn_mul_1 -- Multiply a limb vector with a limb and store # the result in a second limb vector. -# Copyright 1992, 1994, 1996, 2000 Free Software Foundation, Inc. +# Copyright 1992, 1994, 1996, 2000, 2006 Free Software Foundation, Inc. # This file is part of the GNU MP Library. @@ -29,95 +29,96 @@ .text .align 1 -.globl ___gmpn_mul_1 -___gmpn_mul_1: +.globl __gmpn_mul_1 +.type __gmpn_mul_1, @function +__gmpn_mul_1: .word 0xfc0 - movl 12(ap),r4 - movl 8(ap),r8 - movl 4(ap),r9 - movl 16(ap),r6 + movl 12(%ap),%r4 + movl 8(%ap),%r8 + movl 4(%ap),%r9 + movl 16(%ap),%r6 jlss s2_big # One might want to combine the addl2 and the store below, but that # is actually just slower according to my timing tests. (VAX 3600) - clrl r3 - incl r4 - ashl $-1,r4,r7 - jlbc r4,L1 - clrl r11 + clrl %r3 + incl %r4 + ashl $-1,%r4,%r7 + jlbc %r4,L1 + clrl %r11 # Loop for S2_LIMB < 0x80000000 -Loop1: movl (r8)+,r1 +Loop1: movl (%r8)+,%r1 jlss L1n0 - emul r1,r6,$0,r2 - addl2 r11,r2 - adwc $0,r3 - movl r2,(r9)+ -L1: movl (r8)+,r1 + emul %r1,%r6,$0,%r2 + addl2 %r11,%r2 + adwc $0,%r3 + movl %r2,(%r9)+ +L1: movl (%r8)+,%r1 jlss L1n1 -L1p1: emul r1,r6,$0,r10 - addl2 r3,r10 - adwc $0,r11 - movl r10,(r9)+ +L1p1: emul %r1,%r6,$0,%r10 + addl2 %r3,%r10 + adwc $0,%r11 + movl %r10,(%r9)+ - sobgtr r7,Loop1 - movl r11,r0 + sobgtr %r7,Loop1 + movl %r11,%r0 ret -L1n0: emul r1,r6,$0,r2 - addl2 r11,r2 - adwc r6,r3 - movl r2,(r9)+ - movl (r8)+,r1 +L1n0: emul %r1,%r6,$0,%r2 + addl2 %r11,%r2 + adwc %r6,%r3 + movl %r2,(%r9)+ + movl (%r8)+,%r1 jgeq L1p1 -L1n1: emul r1,r6,$0,r10 - addl2 r3,r10 - adwc r6,r11 - movl r10,(r9)+ +L1n1: emul %r1,%r6,$0,%r10 + addl2 %r3,%r10 + adwc %r6,%r11 + movl %r10,(%r9)+ - sobgtr r7,Loop1 - movl r11,r0 + sobgtr %r7,Loop1 + movl %r11,%r0 ret -s2_big: clrl r3 - incl r4 - ashl $-1,r4,r7 - jlbc r4,L2 - clrl r11 +s2_big: clrl %r3 + incl %r4 + ashl $-1,%r4,%r7 + jlbc %r4,L2 + clrl %r11 # Loop for S2_LIMB >= 0x80000000 -Loop2: movl (r8)+,r1 +Loop2: movl (%r8)+,%r1 jlss L2n0 - emul r1,r6,$0,r2 - addl2 r11,r2 - adwc r1,r3 - movl r2,(r9)+ -L2: movl (r8)+,r1 + emul %r1,%r6,$0,%r2 + addl2 %r11,%r2 + adwc %r1,%r3 + movl %r2,(%r9)+ +L2: movl (%r8)+,%r1 jlss L2n1 -L2p1: emul r1,r6,$0,r10 - addl2 r3,r10 - adwc r1,r11 - movl r10,(r9)+ +L2p1: emul %r1,%r6,$0,%r10 + addl2 %r3,%r10 + adwc %r1,%r11 + movl %r10,(%r9)+ - sobgtr r7,Loop2 - movl r11,r0 + sobgtr %r7,Loop2 + movl %r11,%r0 ret -L2n0: emul r1,r6,$0,r2 - addl2 r1,r3 - addl2 r11,r2 - adwc r6,r3 - movl r2,(r9)+ - movl (r8)+,r1 +L2n0: emul %r1,%r6,$0,%r2 + addl2 %r1,%r3 + addl2 %r11,%r2 + adwc %r6,%r3 + movl %r2,(%r9)+ + movl (%r8)+,%r1 jgeq L2p1 -L2n1: emul r1,r6,$0,r10 - addl2 r1,r11 - addl2 r3,r10 - adwc r6,r11 - movl r10,(r9)+ +L2n1: emul %r1,%r6,$0,%r10 + addl2 %r1,%r11 + addl2 %r3,%r10 + adwc %r6,%r11 + movl %r10,(%r9)+ - sobgtr r7,Loop2 - movl r11,r0 + sobgtr %r7,Loop2 + movl %r11,%r0 ret --- gmp-4.2.1/mpn/vax/rshift.s~ 2006-10-24 22:00:15.000000000 +0200 +++ gmp-4.2.1/mpn/vax/rshift.s 2006-10-25 08:26:29.000000000 +0200 @@ -1,6 +1,6 @@ # VAX mpn_rshift -- right shift. -# Copyright 1999, 2000, 2001 Free Software Foundation, Inc. +# Copyright 1999, 2000, 2001, 2006 Free Software Foundation, Inc. # This file is part of the GNU MP Library. @@ -30,27 +30,28 @@ .text .align 1 -.globl ___gmpn_rshift -___gmpn_rshift: +.globl __gmpn_rshift +.type __gmpn_rshift, @function +__gmpn_rshift: .word 0x1c0 - movl 4(ap),r7 - movl 8(ap),r6 - movl 12(ap),r1 - movl 16(ap),r8 - - movl (r6)+,r2 - subl3 r8,$32,r8 - ashl r8,r2,r0 - decl r1 + movl 4(%ap),%r7 + movl 8(%ap),%r6 + movl 12(%ap),%r1 + movl 16(%ap),%r8 + + movl (%r6)+,%r2 + subl3 %r8,$32,%r8 + ashl %r8,%r2,%r0 + decl %r1 jeql Lend -Loop: movl (r6)+,r3 - ashq r8,r2,r4 - movl r5,(r7)+ - movl r3,r2 - sobgtr r1,Loop - -Lend: clrl r3 - ashq r8,r2,r4 - movl r5,(r7) +Loop: movl (%r6)+,%r3 + ashq %r8,%r2,%r4 + movl %r5,(%r7)+ + movl %r3,%r2 + sobgtr %r1,Loop + +Lend: clrl %r3 + ashq %r8,%r2,%r4 + movl %r5,(%r7) ret --- gmp-4.2.1/mpn/vax/submul_1.s~ 2006-10-24 21:56:17.000000000 +0200 +++ gmp-4.2.1/mpn/vax/submul_1.s 2006-10-25 08:26:46.000000000 +0200 @@ -1,7 +1,7 @@ # VAX __gmpn_submul_1 -- Multiply a limb vector with a limb and subtract # the result from a second limb vector. -# Copyright 1992, 1994, 1996, 2000 Free Software Foundation, Inc. +# Copyright 1992, 1994, 1996, 2000, 2006 Free Software Foundation, Inc. # This file is part of the GNU MP Library. @@ -29,98 +29,99 @@ .text .align 1 -.globl ___gmpn_submul_1 -___gmpn_submul_1: +.globl __gmpn_submul_1 +.type __gmpn_submul_1, @function +__gmpn_submul_1: .word 0xfc0 - movl 12(ap),r4 - movl 8(ap),r8 - movl 4(ap),r9 - movl 16(ap),r6 + movl 12(%ap),%r4 + movl 8(%ap),%r8 + movl 4(%ap),%r9 + movl 16(%ap),%r6 jlss s2_big - clrl r3 - incl r4 - ashl $-1,r4,r7 - jlbc r4,L1 - clrl r11 + clrl %r3 + incl %r4 + ashl $-1,%r4,%r7 + jlbc %r4,L1 + clrl %r11 # Loop for S2_LIMB < 0x80000000 -Loop1: movl (r8)+,r1 +Loop1: movl (%r8)+,%r1 jlss L1n0 - emul r1,r6,$0,r2 - addl2 r11,r2 - adwc $0,r3 - subl2 r2,(r9)+ - adwc $0,r3 -L1: movl (r8)+,r1 + emul %r1,%r6,$0,%r2 + addl2 %r11,%r2 + adwc $0,%r3 + subl2 %r2,(%r9)+ + adwc $0,%r3 +L1: movl (%r8)+,%r1 jlss L1n1 -L1p1: emul r1,r6,$0,r10 - addl2 r3,r10 - adwc $0,r11 - subl2 r10,(r9)+ - adwc $0,r11 +L1p1: emul %r1,%r6,$0,%r10 + addl2 %r3,%r10 + adwc $0,%r11 + subl2 %r10,(%r9)+ + adwc $0,%r11 - sobgtr r7,Loop1 - movl r11,r0 + sobgtr %r7,Loop1 + movl %r11,%r0 ret -L1n0: emul r1,r6,$0,r2 - addl2 r11,r2 - adwc r6,r3 - subl2 r2,(r9)+ - adwc $0,r3 - movl (r8)+,r1 +L1n0: emul %r1,%r6,$0,%r2 + addl2 %r11,%r2 + adwc %r6,%r3 + subl2 %r2,(%r9)+ + adwc $0,%r3 + movl (%r8)+,%r1 jgeq L1p1 -L1n1: emul r1,r6,$0,r10 - addl2 r3,r10 - adwc r6,r11 - subl2 r10,(r9)+ - adwc $0,r11 +L1n1: emul %r1,%r6,$0,%r10 + addl2 %r3,%r10 + adwc %r6,%r11 + subl2 %r10,(%r9)+ + adwc $0,%r11 - sobgtr r7,Loop1 - movl r11,r0 + sobgtr %r7,Loop1 + movl %r11,%r0 ret -s2_big: clrl r3 - incl r4 - ashl $-1,r4,r7 - jlbc r4,L2 - clrl r11 +s2_big: clrl %r3 + incl %r4 + ashl $-1,%r4,%r7 + jlbc %r4,L2 + clrl %r11 # Loop for S2_LIMB >= 0x80000000 -Loop2: movl (r8)+,r1 +Loop2: movl (%r8)+,%r1 jlss L2n0 - emul r1,r6,$0,r2 - addl2 r11,r2 - adwc r1,r3 - subl2 r2,(r9)+ - adwc $0,r3 -L2: movl (r8)+,r1 + emul %r1,%r6,$0,%r2 + addl2 %r11,%r2 + adwc %r1,%r3 + subl2 %r2,(%r9)+ + adwc $0,%r3 +L2: movl (%r8)+,%r1 jlss L2n1 -L2p1: emul r1,r6,$0,r10 - addl2 r3,r10 - adwc r1,r11 - subl2 r10,(r9)+ - adwc $0,r11 +L2p1: emul %r1,%r6,$0,%r10 + addl2 %r3,%r10 + adwc %r1,%r11 + subl2 %r10,(%r9)+ + adwc $0,%r11 - sobgtr r7,Loop2 - movl r11,r0 + sobgtr %r7,Loop2 + movl %r11,%r0 ret -L2n0: emul r1,r6,$0,r2 - addl2 r11,r2 - adwc r6,r3 - subl2 r2,(r9)+ - adwc r1,r3 - movl (r8)+,r1 +L2n0: emul %r1,%r6,$0,%r2 + addl2 %r11,%r2 + adwc %r6,%r3 + subl2 %r2,(%r9)+ + adwc %r1,%r3 + movl (%r8)+,%r1 jgeq L2p1 -L2n1: emul r1,r6,$0,r10 - addl2 r3,r10 - adwc r6,r11 - subl2 r10,(r9)+ - adwc r1,r11 +L2n1: emul %r1,%r6,$0,%r10 + addl2 %r3,%r10 + adwc %r6,%r11 + subl2 %r10,(%r9)+ + adwc %r1,%r11 - sobgtr r7,Loop2 - movl r11,r0 + sobgtr %r7,Loop2 + movl %r11,%r0 ret --- gmp-4.2.1/mpn/vax/sub_n.s~ 2006-10-24 21:49:24.000000000 +0200 +++ gmp-4.2.1/mpn/vax/sub_n.s 2006-10-25 08:27:08.000000000 +0200 @@ -1,7 +1,7 @@ # VAX __gmpn_sub_n -- Subtract two limb vectors of the same length > 0 and store # difference in a third limb vector. -# Copyright 1999, 2000 Free Software Foundation, Inc. +# Copyright 1999, 2000, 2006 Free Software Foundation, Inc. # This file is part of the GNU MP Library. @@ -29,33 +29,34 @@ .text .align 1 -.globl ___gmpn_sub_n -___gmpn_sub_n: +.globl __gmpn_sub_n +.type __gmpn_sub_n, @function +__gmpn_sub_n: .word 0x0 - movl 16(ap),r0 - movl 12(ap),r1 - movl 8(ap),r2 - movl 4(ap),r3 - mnegl r0,r5 - addl2 $3,r0 - ashl $-2,r0,r0 # unroll loop count - bicl2 $-4,r5 # mask out low 2 bits - movaq (r5)[r5],r5 # 9x - jmp Loop(r5) - -Loop: movl (r2)+,r4 - sbwc (r1)+,r4 - movl r4,(r3)+ - movl (r2)+,r4 - sbwc (r1)+,r4 - movl r4,(r3)+ - movl (r2)+,r4 - sbwc (r1)+,r4 - movl r4,(r3)+ - movl (r2)+,r4 - sbwc (r1)+,r4 - movl r4,(r3)+ - sobgtr r0,Loop + movl 16(%ap),%r0 + movl 12(%ap),%r1 + movl 8(%ap),%r2 + movl 4(%ap),%r3 + mnegl %r0,%r5 + addl2 $3,%r0 + ashl $-2,%r0,%r0 # unroll loop count + bicl2 $-4,%r5 # mask out low 2 bits + movaq (%r5)[%r5],%r5 # 9x + jmp Loop(%r5) + +Loop: movl (%r2)+,%r4 + sbwc (%r1)+,%r4 + movl %r4,(%r3)+ + movl (%r2)+,%r4 + sbwc (%r1)+,%r4 + movl %r4,(%r3)+ + movl (%r2)+,%r4 + sbwc (%r1)+,%r4 + movl %r4,(%r3)+ + movl (%r2)+,%r4 + sbwc (%r1)+,%r4 + movl %r4,(%r3)+ + sobgtr %r0,Loop - adwc r0,r0 + adwc %r0,%r0 ret
signature.asc
Description: Digital signature