Hi Guys,

  I am applying the patch below to add support for G10 versions of the
  RL78 assembler multiply and divide functions to the libgcc library.

Cheers
  Nick

libgcc/ChangeLog
2014-11-13  Nick Clifton  <ni...@redhat.com>

        * config/rl78/divmodhi.S: Add support for the G10 architecture.
        Use START_FUNC and END_FUNC macros to enable linker garbage
        collection.
        * config/rl78/divmodqi.S: Likewise.
        * config/rl78/divmodsi.S: Likewise.
        * config/rl78/mulsi3.S: Likewise.
        * config/rl78/lib2div.c: Remove G10 functions.
        * config/rl78/lib2muls.c: Likewise.
        * config/rl78/t-rl8 (HOST_LIBGCC2_CFLAGS): Define.
        * config/rl78/vregs.h (START_FUNC): New macro.
        (END_FUNC): New macro.

Index: libgcc/config/rl78/divmodhi.S
===================================================================
--- libgcc/config/rl78/divmodhi.S	(revision 217462)
+++ libgcc/config/rl78/divmodhi.S	(working copy)
@@ -23,11 +23,9 @@
    see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
    <http://www.gnu.org/licenses/>.  */
 
-#ifndef __RL78_G10__
-
 #include "vregs.h"
 
-	.macro make_generic  which,need_result
+.macro MAKE_GENERIC  which,need_result
 
 	.if \need_result
 	quot = r8
@@ -53,12 +51,12 @@
 	bitB0 = bit
 	bitB1 = bit+1
 
-#if 1
 #define bit	bc
 #define bitB0	c
 #define bitB1	b
-#endif
 
+	START_FUNC __generic_hidivmod\which
+
 num_lt_den\which:
 	.if \need_result
 	movw	r8, #0
@@ -72,12 +70,13 @@
 	;; to store sign information that must remain intact
 
 	.if \need_result
+	.global __generic_hidiv
+__generic_hidiv:
 
-generic_div:
-
 	.else
 
-generic_mod:
+	.global __generic_himod
+__generic_himod:
 
 	.endif
 
@@ -193,42 +192,40 @@
 
 main_loop_done\which:	
 	ret
-	.endm
+	END_FUNC __generic_hidivmod\which
+.endm
+;----------------------------------------------------------------------
 
-	make_generic _d 1
-	make_generic _m 0
+	MAKE_GENERIC _d 1
+	MAKE_GENERIC _m 0
 
 ;----------------------------------------------------------------------
 
-	.global	___udivhi3
-	.type	___udivhi3,@function
-___udivhi3:
+START_FUNC ___udivhi3
 	;; r8 = 4[sp] / 6[sp]
-	call	$!generic_div
+	call	$!__generic_hidiv
 	ret
-	.size	___udivhi3, . - ___udivhi3
+END_FUNC ___udivhi3
 	
 
-	.global	___umodhi3
-	.type	___umodhi3,@function
-___umodhi3:
+START_FUNC ___umodhi3
 	;; r8 = 4[sp] % 6[sp]
-	call	$!generic_mod
+	call	$!__generic_himod
 	ret
-	.size	___umodhi3, . - ___umodhi3
+END_FUNC ___umodhi3
 
 ;----------------------------------------------------------------------
 
-	.macro neg_ax
+.macro NEG_AX
 	movw	hl, ax
 	movw	ax, #0
 	subw	ax, [hl]
 	movw	[hl], ax
-	.endm
+.endm
 
-	.global	___divhi3
-	.type	___divhi3,@function
-___divhi3:
+;----------------------------------------------------------------------
+
+START_FUNC ___divhi3
 	;; r8 = 4[sp] / 6[sp]
 	movw	de, #0
 	mov	a, [sp+5]
@@ -237,7 +234,7 @@
 	mov	a, [sp+7]
 	mov1	cy, a.7
 	bc	$div_signed_den
-	call	$!generic_div
+	call	$!__generic_hidiv
 	ret
 	
 div_signed_num:
@@ -244,7 +241,7 @@
 	;; neg [sp+4]
 	movw	ax, sp
 	addw	ax, #4
-	neg_ax
+	NEG_AX
 	mov	d, #1
 	mov	a, [sp+7]
 	mov1	cy, a.7
@@ -253,10 +250,10 @@
 	;; neg [sp+6]
 	movw	ax, sp
 	addw	ax, #6
-	neg_ax
+	NEG_AX
 	mov	e, #1
 div_unsigned_den:	
-	call	$!generic_div
+	call	$!__generic_hidiv
 
 	mov	a, d
 	cmp0	a
@@ -264,13 +261,13 @@
 	;;  We have to restore the numerator [sp+4]
 	movw	ax, sp
 	addw	ax, #4
-	neg_ax
+	NEG_AX
 	mov	a, d
 div_skip_restore_num:	
 	xor	a, e
 	bz	$div_no_neg
 	movw	ax, #r8
-	neg_ax
+	NEG_AX
 div_no_neg:
 	mov	a, e
 	cmp0	a
@@ -277,15 +274,13 @@
 	bz	$div_skip_restore_den
 	movw	ax, sp
 	addw	ax, #6
-	neg_ax
+	NEG_AX
 div_skip_restore_den:	
 	ret
-	.size	___divhi3, . - ___divhi3
+END_FUNC ___divhi3
 	
 
-	.global	___modhi3
-	.type	___modhi3,@function
-___modhi3:
+START_FUNC ___modhi3
 	;; r8 = 4[sp] % 6[sp]
 	movw	de, #0
 	mov	a, [sp+5]
@@ -294,7 +289,7 @@
 	mov	a, [sp+7]
 	mov1	cy, a.7
 	bc	$mod_signed_den
-	call	$!generic_mod
+	call	$!__generic_himod
 	ret
 	
 mod_signed_num:
@@ -301,7 +296,7 @@
 	;; neg [sp+4]
 	movw	ax, sp
 	addw	ax, #4
-	neg_ax
+	NEG_AX
 	mov	d, #1
 	mov	a, [sp+7]
 	mov1	cy, a.7
@@ -310,19 +305,19 @@
 	;; neg [sp+6]
 	movw	ax, sp
 	addw	ax, #6
-	neg_ax
+	NEG_AX
 mod_unsigned_den:	
-	call	$!generic_mod
+	call	$!__generic_himod
 
 	mov	a, d
 	cmp0	a
 	bz	$mod_no_neg
 	movw	ax, #r8
-	neg_ax
+	NEG_AX
 	;;  Also restore numerator
 	movw 	ax, sp
 	addw	ax, #4
-	neg_ax
+	NEG_AX
 mod_no_neg:
 	mov	a, e
 	cmp0	a
@@ -329,9 +324,7 @@
 	bz	$mod_skip_restore_den
 	movw	ax, sp
 	addw	ax, #6
-	neg_ax
+	NEG_AX
 mod_skip_restore_den:	
 	ret
-	.size	___modhi3, . - ___modhi3
-
-#endif
+END_FUNC ___modhi3
Index: libgcc/config/rl78/divmodqi.S
===================================================================
--- libgcc/config/rl78/divmodqi.S	(revision 217462)
+++ libgcc/config/rl78/divmodqi.S	(working copy)
@@ -23,11 +23,9 @@
    see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
    <http://www.gnu.org/licenses/>.  */
 
-#ifndef __RL78_G10__
-
 #include "vregs.h"
 
-	.macro make_generic  which,need_result
+.macro MAKE_GENERIC  which,need_result
 
 	.if \need_result
 	quot = r8
@@ -41,12 +39,12 @@
 	bit = r14
 	.endif
 
-#if 1
 #define bit	b
 #define den	c
 #define bitden	bc
-#endif
 
+	START_FUNC __generic_qidivmod\which
+
 num_lt_den\which:
 	.if \need_result
 	mov	r8, #0
@@ -72,12 +70,13 @@
 	;; to store sign information that must remain intact
 
 	.if \need_result
+	.global __generic_qidiv
+__generic_qidiv:
 
-generic_div:
-
 	.else
 
-generic_mod:
+	.global __generic_qimod
+__generic_qimod:
 
 	.endif
 
@@ -111,7 +110,8 @@
 ; while (den < num && !(den & (1L << BITS_MINUS_1)))
 
 shift_den_bit\which:
-	.macro	sdb_one\which
+
+.macro	SDB_ONE\which
 	mov	a, den
 	mov1	cy,a.7
 	bc	$enter_main_loop\which
@@ -125,10 +125,10 @@
 
 	;; bit <<= 1
 	shl	bit, 1
-	.endm
+.endm
 
-	sdb_one\which
-	sdb_one\which
+	SDB_ONE\which
+	SDB_ONE\which
 
 	br	$shift_den_bit\which
 
@@ -164,42 +164,41 @@
 
 main_loop_done\which:	
 	ret
-	.endm
+	END_FUNC __generic_qidivmod\which
+.endm
 
-	make_generic _d 1
-	make_generic _m 0
+;----------------------------------------------------------------------
 
+	MAKE_GENERIC _d 1
+	MAKE_GENERIC _m 0
+
 ;----------------------------------------------------------------------
 
-	.global	___udivqi3
-	.type	___udivqi3,@function
-___udivqi3:
+START_FUNC ___udivqi3
 	;; r8 = 4[sp] / 6[sp]
 	movw	hl, sp
-	br	$!generic_div
-	.size	___udivqi3, . - ___udivqi3
+	br	$!__generic_qidiv
+END_FUNC ___udivqi3
 	
 
-	.global	___umodqi3
-	.type	___umodqi3,@function
-___umodqi3:
+START_FUNC ___umodqi3
 	;; r8 = 4[sp] % 6[sp]
 	movw	hl, sp
-	br	$!generic_mod
-	.size	___umodqi3, . - ___umodqi3
+	br	$!__generic_qimod
+END_FUNC ___umodqi3
 
 ;----------------------------------------------------------------------
 
-	.macro neg_ax
+.macro NEG_AX
 	movw	hl, ax
 	mov	a, #0
 	sub	a, [hl]
 	mov	[hl], a
-	.endm
+.endm
 
-	.global	___divqi3
-	.type	___divqi3,@function
-___divqi3:
+;----------------------------------------------------------------------
+
+START_FUNC	___divqi3
 	;; r8 = 4[sp] / 6[sp]
 	movw	hl, sp
 	movw	de, #0
@@ -209,7 +208,7 @@
 	mov	a, [sp+6]
 	mov1	cy, a.7
 	bc	$div_signed_den
-	br	$!generic_div
+	br	$!__generic_qidiv
 	
 div_signed_num:
 	;; neg [sp+4]
@@ -227,7 +226,7 @@
 	mov	[hl+6], a
 	mov	e, #1
 div_unsigned_den:	
-	call	$!generic_div
+	call	$!__generic_qidiv
 
 	mov	a, d
 	cmp0	a
@@ -235,13 +234,13 @@
 	;;  We have to restore the numerator [sp+4]
 	movw	ax, sp
 	addw	ax, #4
-	neg_ax
+	NEG_AX
 	mov	a, d
 div_skip_restore_num:	
 	xor	a, e
 	bz	$div_no_neg
 	movw	ax, #r8
-	neg_ax
+	NEG_AX
 div_no_neg:
 	mov	a, e
 	cmp0	a
@@ -248,15 +247,13 @@
 	bz	$div_skip_restore_den
 	movw	ax, sp
 	addw	ax, #6
-	neg_ax
+	NEG_AX
 div_skip_restore_den:	
 	ret
-	.size	___divqi3, . - ___divqi3
+END_FUNC ___divqi3
 	
 
-	.global	___modqi3
-	.type	___modqi3,@function
-___modqi3:
+START_FUNC ___modqi3
 	;; r8 = 4[sp] % 6[sp]
 	movw	hl, sp
 	movw	de, #0
@@ -266,7 +263,7 @@
 	mov	a, [hl+6]
 	mov1	cy, a.7
 	bc	$mod_signed_den
-	br	$!generic_mod
+	br	$!__generic_qimod
 	
 mod_signed_num:
 	;; neg [sp+4]
@@ -284,7 +281,7 @@
 	mov	[hl+6], a
 	mov	e, #1
 mod_unsigned_den:	
-	call	$!generic_mod
+	call	$!__generic_qimod
 
 	mov	a, d
 	cmp0	a
@@ -295,7 +292,7 @@
 	;;  Also restore numerator
 	movw 	ax, sp
 	addw	ax, #4
-	neg_ax
+	NEG_AX
 mod_no_neg:
 	mov	a, e
 	cmp0	a
@@ -302,9 +299,7 @@
 	bz	$mod_skip_restore_den
 	movw	ax, sp
 	addw	ax, #6
-	neg_ax
+	NEG_AX
 mod_skip_restore_den:	
 	ret
-	.size	___modqi3, . - ___modqi3
-
-#endif
+END_FUNC ___modqi3
Index: libgcc/config/rl78/divmodsi.S
===================================================================
--- libgcc/config/rl78/divmodsi.S	(revision 217462)
+++ libgcc/config/rl78/divmodsi.S	(working copy)
@@ -23,11 +23,9 @@
    see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
    <http://www.gnu.org/licenses/>.  */
 
-#ifndef __RL78_G10__
-
 #include "vregs.h"
 
-	.macro make_generic  which,need_result
+.macro MAKE_GENERIC  which,need_result
 
 	.if \need_result
 	quot = r8
@@ -69,6 +67,8 @@
 	bitB2 = bit+2
 	bitB3 = bit+3
 
+START_FUNC __generic_sidivmod\which
+
 num_lt_den\which:
 	.if \need_result
 	movw	r8, #0
@@ -100,12 +100,13 @@
 	;; to store sign information that must remain intact
 
 	.if \need_result
+	.global __generic_sidiv
+__generic_sidiv:
 
-generic_div:
-
 	.else
 
-generic_mod:
+	.global __generic_simod
+__generic_simod:
 
 	.endif
 
@@ -119,6 +120,14 @@
 	cmpw	ax, [hl+8] ; numL
 	bh	$num_lt_den\which
 
+#ifdef __RL78_G10__
+	movw	ax, denL
+	push	ax
+	movw	ax, bitL
+	push	ax
+	movw	ax, bitH
+	push	ax
+#else
 	sel	rb2
 	push	ax		; denL
 ;	push	bc		; denH
@@ -125,6 +134,7 @@
 	push	de		; bitL
 	push	hl		; bitH - stored in BC
 	sel	rb0
+#endif
 
 	;; (quot,rem) = 16[sp] /% 20[sp]
 
@@ -360,6 +370,14 @@
 	bnz	$main_loop_himode\which
 
 main_loop_done_himode\which:	
+#ifdef __RL78_G10__
+	pop	ax
+	movw	bitH, ax
+	pop	ax
+	movw	bitL, ax
+	pop	ax
+	movw	denL, ax
+#else
 	sel	rb2
 	pop	hl		; bitH - stored in BC
 	pop	de		; bitL
@@ -366,35 +384,35 @@
 ;	pop	bc		; denH
 	pop	ax		; denL
 	sel	rb0
+#endif
 
 	ret
-	.endm
+END_FUNC __generic_sidivmod\which
+.endm
 
-	make_generic _d 1
-	make_generic _m 0
+;----------------------------------------------------------------------
 
+	MAKE_GENERIC _d 1
+	MAKE_GENERIC _m 0
+
 ;----------------------------------------------------------------------
 
-	.global	___udivsi3
-	.type	___udivsi3,@function
-___udivsi3:
+START_FUNC ___udivsi3
 	;; r8 = 4[sp] / 8[sp]
-	call	$!generic_div
+	call	$!__generic_sidiv
 	ret
-	.size	___udivsi3, . - ___udivsi3
+END_FUNC ___udivsi3
 	
 
-	.global	___umodsi3
-	.type	___umodsi3,@function
-___umodsi3:
+START_FUNC ___umodsi3
 	;; r8 = 4[sp] % 8[sp]
-	call	$!generic_mod
+	call	$!__generic_simod
 	ret
-	.size	___umodsi3, . - ___umodsi3
+END_FUNC ___umodsi3
 
 ;----------------------------------------------------------------------
 
-	.macro neg_ax
+.macro NEG_AX
 	movw	hl, ax
 	movw	ax, #0
 	subw	ax, [hl]
@@ -404,11 +422,11 @@
 	decw	ax
 	subw	ax, [hl+2]
 	movw	[hl+2], ax
-	.endm
+.endm
 
-	.global	___divsi3
-	.type	___divsi3,@function
-___divsi3:
+;----------------------------------------------------------------------
+
+START_FUNC ___divsi3
 	;; r8 = 4[sp] / 8[sp]
 	movw	de, #0
 	mov	a, [sp+7]
@@ -417,14 +435,14 @@
 	mov	a, [sp+11]
 	mov1	cy, a.7
 	bc	$div_signed_den
-	call	$!generic_div
+	call	$!__generic_sidiv
 	ret
-	
+
 div_signed_num:
 	;; neg [sp+4]
 	movw	ax, sp
 	addw	ax, #4
-	neg_ax
+	NEG_AX
 	mov	d, #1
 	mov	a, [sp+11]
 	mov1	cy, a.7
@@ -433,10 +451,10 @@
 	;; neg [sp+8]
 	movw	ax, sp
 	addw	ax, #8
-	neg_ax
+	NEG_AX
 	mov	e, #1
 div_unsigned_den:	
-	call	$!generic_div
+	call	$!__generic_sidiv
 
 	mov	a, d
 	cmp0	a
@@ -444,13 +462,13 @@
 	;;  We have to restore the numerator [sp+4]
 	movw	ax, sp
 	addw	ax, #4
-	neg_ax
+	NEG_AX
 	mov	a, d
 div_skip_restore_num:	
 	xor	a, e
 	bz	$div_no_neg
 	movw	ax, #r8
-	neg_ax
+	NEG_AX
 div_no_neg:
 	mov	a, e
 	cmp0	a
@@ -458,15 +476,13 @@
 	;;  We have to restore the denominator [sp+8]
 	movw	ax, sp
 	addw	ax, #8
-	neg_ax
+	NEG_AX
 div_skip_restore_den:
 	ret
-	.size	___divsi3, . - ___divsi3
+END_FUNC ___divsi3
 	
 
-	.global	___modsi3
-	.type	___modsi3,@function
-___modsi3:
+START_FUNC ___modsi3
 	;; r8 = 4[sp] % 8[sp]
 	movw	de, #0
 	mov	a, [sp+7]
@@ -475,14 +491,14 @@
 	mov	a, [sp+11]
 	mov1	cy, a.7
 	bc	$mod_signed_den
-	call	$!generic_mod
+	call	$!__generic_simod
 	ret
-	
+
 mod_signed_num:
 	;; neg [sp+4]
 	movw	ax, sp
 	addw	ax, #4
-	neg_ax
+	NEG_AX
 	mov	d, #1
 	mov	a, [sp+11]
 	mov1	cy, a.7
@@ -491,20 +507,20 @@
 	;; neg [sp+8]
 	movw	ax, sp
 	addw	ax, #8
-	neg_ax
+	NEG_AX
 	mov	e, #1
 mod_unsigned_den:	
-	call	$!generic_mod
+	call	$!__generic_simod
 
 	mov	a, d
 	cmp0	a
 	bz	$mod_no_neg
 	movw	ax, #r8
-	neg_ax
+	NEG_AX
 	;;  We have to restore [sp+4] as well.
 	movw	ax, sp
 	addw	ax, #4
-	neg_ax
+	NEG_AX
 mod_no_neg:
  .if 1
 	mov	a, e
@@ -512,10 +528,8 @@
 	bz	$mod_skip_restore_den
 	movw	ax, sp
 	addw	ax, #8
-	neg_ax
+	NEG_AX
 mod_skip_restore_den:	
  .endif	
 	ret
-	.size	___modsi3, . - ___modsi3
-
-#endif
+END_FUNC ___modsi3
Index: libgcc/config/rl78/lib2div.c
===================================================================
--- libgcc/config/rl78/lib2div.c	(revision 217462)
+++ libgcc/config/rl78/lib2div.c	(working copy)
@@ -34,41 +34,6 @@
 #define C3B(a,b,c) a##b##c
 #define C3(a,b,c) C3B(a,b,c)
 
-#ifdef __RL78_G10__
-
-#define UINT_TYPE	uint32_type
-#define SINT_TYPE	sint32_type
-#define BITS_MINUS_1	31
-#define NAME_MODE	si
-
-#include "rl78-divmod.h"
-
-#undef UINT_TYPE
-#undef SINT_TYPE
-#undef BITS_MINUS_1
-#undef NAME_MODE
-
-#define UINT_TYPE	uint16_type
-#define SINT_TYPE	sint16_type
-#define BITS_MINUS_1	15
-#define NAME_MODE	hi
-
-#include "rl78-divmod.h"
-
-#undef UINT_TYPE
-#undef SINT_TYPE
-#undef BITS_MINUS_1
-#undef NAME_MODE
-
-#define UINT_TYPE	uint08_type
-#define SINT_TYPE	sint08_type
-#define BITS_MINUS_1	7
-#define NAME_MODE	qi
-
-#include "rl78-divmod.h"
-
-#endif
-
 /* See the comment by the definition of LIBGCC2_UNITS_PER_WORD in
    m32c.h for why we are creating extra versions of some of the
    functions defined in libgcc2.c.  */
Index: libgcc/config/rl78/lib2mul.c
===================================================================
--- libgcc/config/rl78/lib2mul.c	(revision 217462)
+++ libgcc/config/rl78/lib2mul.c	(working copy)
@@ -30,30 +30,10 @@
 #define C3B(a,b,c) a##b##c
 #define C3(a,b,c) C3B(a,b,c)
 
-#ifdef __RL78_G10__
-
-#define UINT_TYPE	uint32_type
-#define BITS_MINUS_1	31
-#define NAME_MODE	si
-
-#include "rl78-mul.h"
-
 #undef UINT_TYPE
 #undef BITS_MINUS_1
 #undef NAME_MODE
 
-#define UINT_TYPE	uint16_type
-#define BITS_MINUS_1	15
-#define NAME_MODE	hi
-
-#include "rl78-mul.h"
-
-#endif
-
-#undef UINT_TYPE
-#undef BITS_MINUS_1
-#undef NAME_MODE
-
 #define UINT_TYPE	uint08_type
 #define BITS_MINUS_1	7
 #define NAME_MODE	qi
Index: libgcc/config/rl78/mulsi3.S
===================================================================
--- libgcc/config/rl78/mulsi3.S	(revision 217462)
+++ libgcc/config/rl78/mulsi3.S	(working copy)
@@ -24,12 +24,6 @@
 
 #include "vregs.h"
  
-; the G10 only has one register bank, so cannot use these optimized
-; versions.  Use the C version instead.
-
-#ifndef __RL78_G10__
-
-
 ;----------------------------------------------------------------------
 
 ; Register use:
@@ -39,19 +33,22 @@
 ; DE	count	(resL-tmp)
 ; HL	[sp+4]
 
-	.text
-	nop
-	.global	___mulsi3		; (USI a, USI b)
-___mulsi3:
+START_FUNC ___mulsi3
 	;; A is at [sp+4]
 	;; B is at [sp+8]
 	;; result is in R8..R11
 
+#ifdef __RL78_G10__
+	movw	ax, r16
+	push	ax
+	movw	ax, r18
+	push	ax
+#else
 	sel	rb2
 	push	ax
 	push	bc
 	sel	rb0
-
+#endif
 	clrw	ax
 	movw	r8, ax
 	movw	r16, ax
@@ -62,9 +59,19 @@
 	cmpw	ax, #0xffff
 	bnz	$2f
 	movw	ax, [sp+8]
+#ifdef __RL78_G10__
+	push    bc
+	movw    bc, r8
+	xchw	ax, bc
+	subw    ax, bc
+	movw	r8, ax
+	movw    ax, bc
+	pop     bc
+#else
 	sel	rb1
 	subw	ax, r_0
 	sel	rb0
+#endif
 	br	$1f
 2:	
 	movw	bc, ax
@@ -80,9 +87,19 @@
 	cmpw	ax, #0xffff
 	bnz	$2f
 	movw	ax, [sp+12]
+#ifdef __RL78_G10__
+	push    bc
+	movw    bc, r8
+	xchw	ax, bc
+	subw    ax, bc
+	movw	r8, ax
+	movw    ax, bc
+	pop     bc
+#else
 	sel	rb1
 	subw	ax, r_0
 	sel	rb0
+#endif
 	br	$1f
 2:	
 	movw	bc, ax
@@ -113,12 +130,28 @@
 	movw	r10, ax
 	movw	ax, bc
 
-
 .Lmul_hisi_top:
 	movw	bc, #0
 
 .Lmul_hisi_loop:
 	shrw	ax, 1
+#ifdef __RL78_G10__
+	push	ax
+	bnc	$.Lmul_hisi_no_add_g10
+	movw	ax, r8
+	addw	ax, r10
+	movw	r8, ax
+	sknc
+	incw	r16
+	movw	ax, r16
+	addw	ax, r_2
+	movw	r16, ax
+.Lmul_hisi_no_add_g10:
+	movw	ax, r10
+	shlw	ax, 1
+	movw	r10, ax
+	pop	ax
+#else
 	bnc	$.Lmul_hisi_no_add
 	sel	rb1
 	addw	ax, bc
@@ -130,11 +163,29 @@
 	sel	rb1
 	shlw	bc, 1
 	sel	rb0
+#endif
 	rolwc	bc, 1
 	cmpw	ax, #0
 	bz	$.Lmul_hisi_done
 
 	shrw	ax, 1
+#ifdef __RL78_G10__
+	push	ax
+	bnc	$.Lmul_hisi_no_add2_g10
+	movw	ax, r8
+	addw	ax, r10
+	movw	r8, ax
+	movw	ax, r16
+	sknc
+	incw	ax
+	addw	ax, r_2
+	movw	r16, ax
+.Lmul_hisi_no_add2_g10:
+	movw	ax, r10
+	shlw	ax, 1
+	movw	r10, ax
+	pop	ax
+#else
 	bnc	$.Lmul_hisi_no_add2
 	sel	rb1
 	addw	ax, bc
@@ -146,6 +197,7 @@
 	sel	rb1
 	shlw	bc, 1
 	sel	rb0
+#endif
 	rolwc	bc, 1
 	cmpw	ax, #0
 	bnz	$.Lmul_hisi_loop
@@ -154,18 +206,25 @@
 
 	movw	ax, r16
 	movw	r10, ax
-	
+
+#ifdef __RL78_G10__
+	pop	ax
+	movw	r18, ax
+	pop	ax
+	movw	r16, ax
+#else
 	sel	rb2
 	pop	bc
 	pop	ax
 	sel	rb0
+#endif
 
 	ret
+END_FUNC ___mulsi3
 
 ;----------------------------------------------------------------------
 
-	.global	___mulhi3
-___mulhi3:
+START_FUNC ___mulhi3
 	movw	r8, #0
 	movw	ax, [sp+6]
 	movw	bc, ax
@@ -179,9 +238,18 @@
 	br	$.Lmul_hi_loop
 	
 .Lmul_hi_top:
+#ifdef __RL78_G10__
+	push	ax
+	movw	ax, r8
+	addw	ax, r_2
+	movw	r8, ax
+	pop	ax
+#else
 	sel	rb1
 	addw	ax, r_2
 	sel	rb0
+#endif
+
 .Lmul_hi_no_add:	
 	shlw	bc, 1
 .Lmul_hi_loop:
@@ -198,5 +266,4 @@
 
 .Lmul_hi_done:
 	ret
-
-#endif
+END_FUNC ___mulhi3
Index: libgcc/config/rl78/t-rl78
===================================================================
--- libgcc/config/rl78/t-rl78	(revision 217462)
+++ libgcc/config/rl78/t-rl78	(working copy)
@@ -30,3 +30,5 @@
 	$(srcdir)/config/rl78/divmodqi.S \
 	$(srcdir)/config/rl78/signbit.S \
 	$(srcdir)/config/rl78/cmpsi2.S
+
+HOST_LIBGCC2_CFLAGS += -Os -ffunction-sections -fdata-sections
Index: libgcc/config/rl78/vregs.h
===================================================================
--- libgcc/config/rl78/vregs.h	(revision 217462)
+++ libgcc/config/rl78/vregs.h	(working copy)
@@ -54,3 +54,18 @@
 r23	=	0xffeef
 
 #endif
+
+    /* Start a function in its own section, so that it
+       can be subject to linker garbage collection.  */
+.macro START_FUNC name
+	.pushsection .text.\name,"ax",@progbits
+	.global \name
+	.type \name , @function
+\name:
+.endm
+
+    /* End the function.  Set the size.  */
+.macro END_FUNC name	
+	.size \name , . - \name
+	.popsection
+.endm
:  LocalWords:  RL rl divmodhi divmodqi divmodsi mulsi muls CFLAGS vregs

Reply via email to