https://gcc.gnu.org/g:f742d074a2e258b4bf938c89337d6c6bc58df3a5
commit f742d074a2e258b4bf938c89337d6c6bc58df3a5 Author: Michael Meissner <meiss...@linux.ibm.com> Date: Fri May 24 00:22:22 2024 -0400 Remove insn alternatives for SPRs with non-integer modes The previous patch changed the modes that SPR registers can hold to just be appropriate sized integers (VSAVE and VSCR can only hold SImode, while CTR and LR can only hold pointer sized values). This patch updates all of the move insns for CC modes and floating point types from having alternatives to move values to and from SPR registers. 2024-05-23 Michael Meissner <meiss...@linux.ibm.com> * config/rs6000/rs6000.md (mov<mode>_internal): Remove alternatives moving values to and from SPR registers. (movcc_<mode>): Likewise. (movsf_hardfloat): Likewise. (movsd_hardfloat): Likewise. (mov<mode>_softfloat): Likewise. (mov<mode>_hardfloat64): Likewise. (mov<mode>_softfloat64): Likewise. Diff: --- gcc/config/rs6000/rs6000.md | 119 ++++++++++++++++++-------------------------- 1 file changed, 49 insertions(+), 70 deletions(-) diff --git a/gcc/config/rs6000/rs6000.md b/gcc/config/rs6000/rs6000.md index 9503871ffd8..e2d679e0b61 100644 --- a/gcc/config/rs6000/rs6000.md +++ b/gcc/config/rs6000/rs6000.md @@ -8063,16 +8063,16 @@ ;; MR LHZ/LBZ LXSI*ZX STH/STB STXSI*X LI ;; XXLOR load 0 load -1 VSPLTI* # MFVSRWZ -;; MTVSRWZ MF%1 MT%1 NOP +;; MTVSRWZ (define_insn "*mov<mode>_internal" [(set (match_operand:QHI 0 "nonimmediate_operand" "=r, r, wa, m, ?Z, r, wa, wa, wa, v, ?v, r, - wa, r, *c*l, *h") + wa") (match_operand:QHI 1 "input_operand" "r, m, ?Z, r, wa, i, wa, O, wM, wB, wS, wa, - r, *h, r, 0"))] + r"))] "gpc_reg_operand (operands[0], <MODE>mode) || gpc_reg_operand (operands[1], <MODE>mode)" "@ @@ -8088,22 +8088,19 @@ vspltis<wd> %0,%1 # mfvsrwz %0,%x1 - mtvsrwz %x0,%1 - mf%1 %0 - mt%0 %1 - nop" + mtvsrwz %x0,%1" [(set_attr "type" "*, load, fpload, store, fpstore, *, vecsimple, vecperm, vecperm, vecperm, vecperm, mfvsr, - mtvsr, mfjmpr, mtjmpr, *") + mtvsr") (set_attr "length" "*, *, *, *, *, *, *, *, *, *, 8, *, - *, *, *, *") + *") (set_attr "isa" "*, *, p9v, *, p9v, *, p9v, p9v, p9v, p9v, p9v, p9v, - p9v, *, *, *")]) + p9v")]) ;; Here is how to move condition codes around. When we store CC data in @@ -8119,9 +8116,9 @@ (define_insn "*movcc_<mode>" [(set (match_operand:CC_any 0 "nonimmediate_operand" - "=y,x,?y,y,r,r,r,r, r,*c*l,r,m") + "=y,x,?y,y,r,r,r,r,r,m") (match_operand:CC_any 1 "general_operand" - " y,r, r,O,x,y,r,I,*h, r,m,r"))] + " y,r, r,O,x,y,r,I,m,r"))] "register_operand (operands[0], <MODE>mode) || register_operand (operands[1], <MODE>mode)" "@ @@ -8133,8 +8130,6 @@ mfcr %0%Q1\;rlwinm %0,%0,%f1,0xf0000000 mr %0,%1 li %0,%1 - mf%1 %0 - mt%0 %1 lwz%U1%X1 %0,%1 stw%U0%X0 %1,%0" [(set_attr_alternative "type" @@ -8148,11 +8143,9 @@ (const_string "mfcrf") (const_string "mfcr")) (const_string "integer") (const_string "integer") - (const_string "mfjmpr") - (const_string "mtjmpr") (const_string "load") (const_string "store")]) - (set_attr "length" "*,*,12,*,*,8,*,*,*,*,*,*")]) + (set_attr "length" "*,*,12,*,*,8,*,*,*,*")]) ;; For floating-point, we normally deal with the floating-point registers ;; unless -msoft-float is used. The sole exception is that parameter passing @@ -8203,17 +8196,17 @@ ;; ;; LWZ LFS LXSSP LXSSPX STFS STXSSP ;; STXSSPX STW XXLXOR LI FMR XSCPSGNDP -;; MR MT<x> MF<x> NOP XXSPLTIDP +;; MR XXSPLTIDP (define_insn "movsf_hardfloat" [(set (match_operand:SF 0 "nonimmediate_operand" "=!r, f, v, wa, m, wY, Z, m, wa, !r, f, wa, - !r, *c*l, !r, *h, wa") + !r, wa") (match_operand:SF 1 "input_operand" "m, m, wY, Z, f, v, wa, r, j, j, f, wa, - r, r, *h, 0, eP"))] + r, eP"))] "(register_operand (operands[0], SFmode) || register_operand (operands[1], SFmode)) && TARGET_HARD_FLOAT @@ -8233,32 +8226,29 @@ fmr %0,%1 xscpsgndp %x0,%x1,%x1 mr %0,%1 - mt%0 %1 - mf%1 %0 - nop #" [(set_attr "type" "load, fpload, fpload, fpload, fpstore, fpstore, fpstore, store, veclogical, integer, fpsimple, fpsimple, - *, mtjmpr, mfjmpr, *, vecperm") + *, vecperm") (set_attr "isa" "*, *, p9v, p8v, *, p9v, p8v, *, *, *, *, *, - *, *, *, *, p10") + *, p10") (set_attr "prefixed" "*, *, *, *, *, *, *, *, *, *, *, *, - *, *, *, *, yes")]) + *, yes")]) ;; LWZ LFIWZX STW STFIWX MTVSRWZ MFVSRWZ -;; FMR MR MT%0 MF%1 NOP +;; FMR MR (define_insn "movsd_hardfloat" [(set (match_operand:SD 0 "nonimmediate_operand" "=!r, d, m, ?Z, ?d, ?r, - f, !r, *c*l, !r, *h") + f, !r") (match_operand:SD 1 "input_operand" "m, ?Z, r, wx, r, d, - f, r, r, *h, 0"))] + f, r"))] "(register_operand (operands[0], SDmode) || register_operand (operands[1], SDmode)) && TARGET_HARD_FLOAT" @@ -8270,49 +8260,43 @@ mtvsrwz %x0,%1 mfvsrwz %0,%x1 fmr %0,%1 - mr %0,%1 - mt%0 %1 - mf%1 %0 - nop" + mr %0,%1" [(set_attr "type" "load, fpload, store, fpstore, mtvsr, mfvsr, - fpsimple, *, mtjmpr, mfjmpr, *") + fpsimple, *") (set_attr "isa" "*, p7, *, *, p8v, p8v, - *, *, *, *, *")]) + *, *")]) -;; MR MT%0 MF%0 LWZ STW LI -;; LIS G-const. F/n-const NOP +;; MR LWZ STW LI +;; LIS G-const. F/n-const (define_insn "*mov<mode>_softfloat" [(set (match_operand:FMOVE32 0 "nonimmediate_operand" - "=r, *c*l, r, r, m, r, - r, r, r, *h") + "=r, r, m, r, + r, r, r") (match_operand:FMOVE32 1 "input_operand" - "r, r, *h, m, r, I, - L, G, Fn, 0"))] + "r, m, r, I, + L, G, Fn"))] "(gpc_reg_operand (operands[0], <MODE>mode) || gpc_reg_operand (operands[1], <MODE>mode)) && TARGET_SOFT_FLOAT" "@ mr %0,%1 - mt%0 %1 - mf%1 %0 lwz%U1%X1 %0,%1 stw%U0%X0 %1,%0 li %0,%1 lis %0,%v1 # - # - nop" + #" [(set_attr "type" - "*, mtjmpr, mfjmpr, load, store, *, - *, *, *, *") + "*, load, store, *, + *, *, *") (set_attr "length" - "*, *, *, *, *, *, - *, *, 8, *")]) + "*, *, *, *, + *, *, 8")]) ;; Like movsf, but adjust a SI value to be used in a SF context, i.e. ;; (set (reg:SF ...) (subreg:SF (reg:SI ...) 0)) @@ -8592,20 +8576,20 @@ ;; STFD LFD FMR LXSD STXSD ;; LXSDX STXSDX XXLOR XXLXOR LI 0 -;; STD LD MR MT{CTR,LR} MF{CTR,LR} -;; NOP MFVSRD MTVSRD XXSPLTIDP +;; STD LD MR +;; MFVSRD MTVSRD XXSPLTIDP (define_insn "*mov<mode>_hardfloat64" [(set (match_operand:FMOVE64 0 "nonimmediate_operand" "=m, d, d, <f64_p9>, wY, <f64_av>, Z, <f64_vsx>, <f64_vsx>, !r, - YZ, r, !r, *c*l, !r, - *h, r, <f64_dm>, wa") + YZ, r, !r, + r, <f64_dm>, wa") (match_operand:FMOVE64 1 "input_operand" "d, m, d, wY, <f64_p9>, Z, <f64_av>, <f64_vsx>, <zero_fp>, <zero_fp>, - r, YZ, r, r, *h, - 0, <f64_dm>, r, eP"))] + r, YZ, r, + <f64_dm>, r, eP"))] "TARGET_POWERPC64 && TARGET_HARD_FLOAT && (gpc_reg_operand (operands[0], <MODE>mode) || gpc_reg_operand (operands[1], <MODE>mode))" @@ -8623,39 +8607,36 @@ std%U0%X0 %1,%0 ld%U1%X1 %0,%1 mr %0,%1 - mt%0 %1 - mf%1 %0 - nop mfvsrd %0,%x1 mtvsrd %x0,%1 #" [(set_attr "type" "fpstore, fpload, fpsimple, fpload, fpstore, fpload, fpstore, veclogical, veclogical, integer, - store, load, *, mtjmpr, mfjmpr, - *, mfvsr, mtvsr, vecperm") + store, load, *, + mfvsr, mtvsr, vecperm") (set_attr "size" "64") (set_attr "isa" "*, *, *, p9v, p9v, p7v, p7v, *, *, *, - *, *, *, *, *, - *, p8v, p8v, p10") + *, *, *, + p8v, p8v, p10") (set_attr "prefixed" "*, *, *, *, *, *, *, *, *, *, - *, *, *, *, *, - *, *, *, *")]) + *, *, *, + *, *, *")]) ;; STD LD MR MT<SPR> MF<SPR> G-const ;; H-const F-const Special (define_insn "*mov<mode>_softfloat64" [(set (match_operand:FMOVE64 0 "nonimmediate_operand" - "=Y, r, r, *c*l, r, r, + "=Y, r, r, r, r, r, *h") (match_operand:FMOVE64 1 "input_operand" - "r, Y, r, r, *h, G, + "r, Y, r, G, H, F, 0"))] "TARGET_POWERPC64 && TARGET_SOFT_FLOAT @@ -8665,18 +8646,16 @@ std%U0%X0 %1,%0 ld%U1%X1 %0,%1 mr %0,%1 - mt%0 %1 - mf%1 %0 # # # nop" [(set_attr "type" - "store, load, *, mtjmpr, mfjmpr, *, + "store, load, *, *, *, *, *") (set_attr "length" - "*, *, *, *, *, 8, + "*, *, *, 8, 12, 16, *")]) ;; Split the VSX prefixed instruction to support SFmode and DFmode scalar