Here is patch #4 that moves the MOVDF/MOVDD insns into calling C code. I added
documentation to the various MOVD{F,D} patterns similar to the documentation
I've done on the other patterns to make it simpler to track which two
constraints match which instruction and which instruction type is used.
The next patch may tackle an instruction discrepancy that I've noticed in
building Spec 2006. The tonto benchmark generates slightly different code with
these changes than with. It doesn't affect the runtime of the benchmark, but
for these infrastructure changes, they should generate the same code.
After that, I will tackle the 32-bit moves and then the 8/16-bit moves.
2018-03-16 Michael Meissner <[email protected]>
* config/rs6000/rs6000-output.c (rs6000_output_move_64bit): Deal
with SPR<-SPR where the register is the same.
* config/rs6000/rs6000.md (mov<mode>_hardfloat32): Add comments
and spacing to allow easier understanding of which constraints are
used for which alternative. Use rs6000_valid_move_p to validate
the move. Use rs6000_output_move_64bit to print out the correct
instruction.
(mov<mode>_softfloat32): Likewise.
(mov<mode>_hardfloat64): Likewise.
(mov<mode>_softfloat64): Likewise.
--
Michael Meissner, IBM
IBM, M/S 2506R, 550 King Street, Littleton, MA 01460-6245, USA
email: [email protected], phone: +1 (978) 899-4797
Index: gcc/config/rs6000/rs6000-output.c
===================================================================
--- gcc/config/rs6000/rs6000-output.c (revision 258576)
+++ gcc/config/rs6000/rs6000-output.c (working copy)
@@ -162,7 +162,13 @@ rs6000_output_move_64bit (rtx operands[]
/* Moves to SPRs. */
else if (reg_is_spr_p (dest))
- return "mt%0 %1";
+ {
+ if (src_gpr_p)
+ return "mt%0 %1";
+
+ else if (dest_regno == src_regno)
+ return "nop";
+ }
}
/* Loads. */
Index: gcc/config/rs6000/rs6000.md
===================================================================
--- gcc/config/rs6000/rs6000.md (revision 258576)
+++ gcc/config/rs6000/rs6000.md (working copy)
@@ -7398,92 +7398,108 @@ (define_split
;; If we have FPR registers, rs6000_emit_move has moved all constants to
memory,
;; except for 0.0 which can be created on VSX with an xor instruction.
+;; STFD LFD FMR LXSD STXSD
+;; LXSD STXSD XXLOR XXLXOR GPR<-0
+;; LWZ STW MR
+
(define_insn "*mov<mode>_hardfloat32"
- [(set (match_operand:FMOVE64 0 "nonimmediate_operand"
"=m,d,d,<f64_p9>,wY,<f64_av>,Z,<f64_vsx>,<f64_vsx>,!r,Y,r,!r")
- (match_operand:FMOVE64 1 "input_operand"
"d,m,d,wY,<f64_p9>,Z,<f64_av>,<f64_vsx>,<zero_fp>,<zero_fp>,r,Y,r"))]
+ [(set (match_operand:FMOVE64 0 "nonimmediate_operand"
+ "=m, d, d, <f64_p9>, wY,
+ <f64_av>, Z, <f64_vsx>, <f64_vsx>, !r,
+ Y, r, !r")
+
+ (match_operand:FMOVE64 1 "input_operand"
+ "d, m, d, wY, <f64_p9>,
+ Z, <f64_av>, <f64_vsx>, <zero_fp>, <zero_fp>,
+ r, Y, r"))]
+
"! TARGET_POWERPC64 && TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
- && (gpc_reg_operand (operands[0], <MODE>mode)
- || gpc_reg_operand (operands[1], <MODE>mode))"
- "@
- stfd%U0%X0 %1,%0
- lfd%U1%X1 %0,%1
- fmr %0,%1
- lxsd %0,%1
- stxsd %1,%0
- lxsd%U1x %x0,%y1
- stxsd%U0x %x1,%y0
- xxlor %x0,%x1,%x1
- xxlxor %x0,%x0,%x0
- #
- #
- #
- #"
- [(set_attr "type"
"fpstore,fpload,fpsimple,fpload,fpstore,fpload,fpstore,veclogical,veclogical,two,store,load,two")
+ && rs6000_valid_move_p (operands[0], operands[1])"
+ "* return rs6000_output_move_64bit (operands);"
+ [(set_attr "type"
+ "fpstore, fpload, fpsimple, fpload, fpstore,
+ fpload, fpstore, veclogical, veclogical, two,
+ store, load, two")
+
(set_attr "size" "64")
- (set_attr "length" "4,4,4,4,4,4,4,4,4,8,8,8,8")])
+ (set_attr "length"
+ "4, 4, 4, 4, 4,
+ 4, 4, 4, 4, 8,
+ 8, 8, 8")])
+
+;; STW LWZ MR G-const H-const F-const
(define_insn "*mov<mode>_softfloat32"
- [(set (match_operand:FMOVE64 0 "nonimmediate_operand" "=Y,r,r,r,r,r")
- (match_operand:FMOVE64 1 "input_operand" "r,Y,r,G,H,F"))]
- "! TARGET_POWERPC64
- && (TARGET_SINGLE_FLOAT || TARGET_SOFT_FLOAT)
- && (gpc_reg_operand (operands[0], <MODE>mode)
- || gpc_reg_operand (operands[1], <MODE>mode))"
- "#"
- [(set_attr "type" "store,load,two,*,*,*")
- (set_attr "length" "8,8,8,8,12,16")])
+ [(set (match_operand:FMOVE64 0 "nonimmediate_operand"
+ "=Y, r, r, r, r, r")
+
+ (match_operand:FMOVE64 1 "input_operand"
+ "r, Y, r, G, H, F"))]
+
+ "! TARGET_POWERPC64 && (TARGET_SINGLE_FLOAT || TARGET_SOFT_FLOAT)
+ && rs6000_valid_move_p (operands[0], operands[1])"
+ "* return rs6000_output_move_64bit (operands);"
+ [(set_attr "type"
+ "store, load, two, *, *, *")
+
+ (set_attr "length"
+ "8, 8, 8, 8, 12, 16")])
; ld/std require word-aligned displacements -> 'Y' constraint.
; List Y->r and r->Y before r->r for reload.
+
+;; STFD LFD FMR LXSD STXSD
+;; LXSDX STXSDX XXLOR XXLXOR LI 0
+;; STD LD MR MT<SPR> MF<SPR>
+;; NOP MFTGPR MFFGPR MTVSRD MFVSRD
+
(define_insn "*mov<mode>_hardfloat64"
- [(set (match_operand:FMOVE64 0 "nonimmediate_operand"
"=m,d,d,<f64_p9>,wY,<f64_av>,Z,<f64_vsx>,<f64_vsx>,!r,Y,r,!r,*c*l,!r,*h,r,wg,r,<f64_dm>")
- (match_operand:FMOVE64 1 "input_operand"
"d,m,d,wY,<f64_p9>,Z,<f64_av>,<f64_vsx>,<zero_fp>,<zero_fp>,r,Y,r,r,h,0,wg,r,<f64_dm>,r"))]
+ [(set (match_operand:FMOVE64 0 "nonimmediate_operand"
+ "=m, d, d, <f64_p9>, wY,
+ <f64_av>, Z, <f64_vsx>, <f64_vsx>, !r,
+ Y, r, !r, *c*l, !r,
+ *h, r, wg, r, <f64_dm>")
+
+ (match_operand:FMOVE64 1 "input_operand"
+ "d, m, d, wY, <f64_p9>,
+ Z, <f64_av>, <f64_vsx>, <zero_fp>, <zero_fp>,
+ r, Y, r, r, h,
+ 0, wg, r, <f64_dm>, r"))]
+
"TARGET_POWERPC64 && TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
- && (gpc_reg_operand (operands[0], <MODE>mode)
- || gpc_reg_operand (operands[1], <MODE>mode))"
- "@
- stfd%U0%X0 %1,%0
- lfd%U1%X1 %0,%1
- fmr %0,%1
- lxsd %0,%1
- stxsd %1,%0
- lxsd%U1x %x0,%y1
- stxsd%U0x %x1,%y0
- xxlor %x0,%x1,%x1
- xxlxor %x0,%x0,%x0
- li %0,0
- std%U0%X0 %1,%0
- ld%U1%X1 %0,%1
- mr %0,%1
- mt%0 %1
- mf%1 %0
- nop
- mftgpr %0,%1
- mffgpr %0,%1
- mfvsrd %0,%x1
- mtvsrd %x0,%1"
- [(set_attr "type"
"fpstore,fpload,fpsimple,fpload,fpstore,fpload,fpstore,veclogical,veclogical,integer,store,load,*,mtjmpr,mfjmpr,*,mftgpr,mffgpr,mftgpr,mffgpr")
+ && rs6000_valid_move_p (operands[0], operands[1])"
+ "* return rs6000_output_move_64bit (operands);"
+ [(set_attr "type"
+ "fpstore, fpload, fpsimple, fpload, fpstore,
+ fpload, fpstore, veclogical, veclogical, integer,
+ store, load, *, mtjmpr, mfjmpr,
+ *, mftgpr, mffgpr, mftgpr, mffgpr")
+
(set_attr "size" "64")
(set_attr "length" "4")])
+;; STD LD MR MT<SPR> MF<SPR> G-const
+;; H-const F-const Special
+
(define_insn "*mov<mode>_softfloat64"
- [(set (match_operand:FMOVE64 0 "nonimmediate_operand" "=Y,r,r,cl,r,r,r,r,*h")
- (match_operand:FMOVE64 1 "input_operand" "r,Y,r,r,h,G,H,F,0"))]
+ [(set (match_operand:FMOVE64 0 "nonimmediate_operand"
+ "=Y, r, r, cl, r, r,
+ r, r, *h")
+
+ (match_operand:FMOVE64 1 "input_operand"
+ "r, Y, r, r, h, G,
+ H, F, 0"))]
+
"TARGET_POWERPC64 && TARGET_SOFT_FLOAT
- && (gpc_reg_operand (operands[0], <MODE>mode)
- || gpc_reg_operand (operands[1], <MODE>mode))"
- "@
- std%U0%X0 %1,%0
- ld%U1%X1 %0,%1
- mr %0,%1
- mt%0 %1
- mf%1 %0
- #
- #
- #
- nop"
- [(set_attr "type" "store,load,*,mtjmpr,mfjmpr,*,*,*,*")
- (set_attr "length" "4,4,4,4,4,8,12,16,4")])
+ && rs6000_valid_move_p (operands[0], operands[1])"
+ "* return rs6000_output_move_64bit (operands);"
+ [(set_attr "type"
+ "store, load, *, mtjmpr, mfjmpr, *,
+ *, *, *")
+
+ (set_attr "length"
+ "4, 4, 4, 4, 4, 8,
+ 12, 16, 4")])
(define_expand "mov<mode>"
[(set (match_operand:FMOVE128 0 "general_operand")