This pattern needs TARGET_MMX as it uses mmx registers.

2019-05-22  Uroš Bizjak  <ubiz...@gmail.com>

    * config/i386/sse.md (sse_cvtpi2ps): Use TARGET_MMX in insn condition.

(+ some trivial code reorg parts).

Bootstrapped and regression tested on x86_64-linux-gnu {,-m32}.

Committed to mainline SVN.

Uros.
diff --git a/gcc/config/i386/mmx.md b/gcc/config/i386/mmx.md
index adad950fa04d..dc8dabfafc85 100644
--- a/gcc/config/i386/mmx.md
+++ b/gcc/config/i386/mmx.md
@@ -1521,8 +1521,7 @@
 
       op = gen_rtx_VEC_SELECT (V8HImode, operands[1], mask);
     }
-  rtx insn = gen_rtx_SET (operands[0], op);
-  emit_insn (insn);
+  emit_insn (gen_rtx_SET (operands[0], op));
   DONE;
 }
   [(set_attr "mmx_isa" "native,x64,x64_avx")
diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
index 7e7b3417cfc8..7d48402ee9d9 100644
--- a/gcc/config/i386/sse.md
+++ b/gcc/config/i386/sse.md
@@ -5012,7 +5012,7 @@
          (match_operand:V4SF 1 "register_operand" "0,0,Yv")
          (const_int 3)))
    (clobber (match_scratch:V4SF 3 "=X,x,Yv"))]
-  "TARGET_SSE || TARGET_MMX_WITH_SSE"
+  "(TARGET_MMX || TARGET_MMX_WITH_SSE) && TARGET_SSE"
   "@
    cvtpi2ps\t{%2, %0|%0, %2}
    #
@@ -5023,8 +5023,7 @@
   rtx op2 = lowpart_subreg (V4SImode, operands[2],
                            GET_MODE (operands[2]));
   /* Generate SSE2 cvtdq2ps.  */
-  rtx insn = gen_floatv4siv4sf2 (operands[3], op2);
-  emit_insn (insn);
+  emit_insn (gen_floatv4siv4sf2 (operands[3], op2));
 
   /* Merge operands[3] with operands[0].  */
   rtx mask, op1;
@@ -5035,7 +5034,7 @@
                                          GEN_INT (6), GEN_INT (7)));
       op1 = gen_rtx_VEC_CONCAT (V8SFmode, operands[3], operands[1]);
       op2 = gen_rtx_VEC_SELECT (V4SFmode, op1, mask);
-      insn = gen_rtx_SET (operands[0], op2);
+      emit_insn (gen_rtx_SET (operands[0], op2));
     }
   else
     {
@@ -5045,8 +5044,7 @@
                                          GEN_INT (4), GEN_INT (5)));
       op1 = gen_rtx_VEC_CONCAT (V8SFmode, operands[0], operands[3]);
       op2 = gen_rtx_VEC_SELECT (V4SFmode, op1, mask);
-      insn = gen_rtx_SET (operands[0], op2);
-      emit_insn (insn);
+      emit_insn (gen_rtx_SET (operands[0], op2));
 
       /* Swap bits 0:63 with bits 64:127.  */
       mask = gen_rtx_PARALLEL (VOIDmode,
@@ -5055,9 +5053,8 @@
       rtx dest = lowpart_subreg (V4SImode, operands[0],
                                 GET_MODE (operands[0]));
       op1 = gen_rtx_VEC_SELECT (V4SImode, dest, mask);
-      insn = gen_rtx_SET (dest, op1);
+      emit_insn (gen_rtx_SET (dest, op1));
     }
-  emit_insn (insn);
   DONE;
 }
   [(set_attr "mmx_isa" "native,x64_noavx,x64_avx")
@@ -16356,14 +16353,12 @@
   /* Emulate MMX palignrdi with SSE psrldq.  */
   rtx op0 = lowpart_subreg (V2DImode, operands[0],
                            GET_MODE (operands[0]));
-  rtx insn;
   if (TARGET_AVX)
-    insn = gen_vec_concatv2di (op0, operands[2], operands[1]);
+    emit_insn (gen_vec_concatv2di (op0, operands[2], operands[1]));
   else
     {
       /* NB: SSE can only concatenate OP0 and OP1 to OP0.  */
-      insn = gen_vec_concatv2di (op0, operands[1], operands[2]);
-      emit_insn (insn);
+      emit_insn (gen_vec_concatv2di (op0, operands[1], operands[2]));
       /* Swap bits 0:63 with bits 64:127.  */
       rtx mask = gen_rtx_PARALLEL (VOIDmode,
                                   gen_rtvec (4, GEN_INT (2),
@@ -16372,9 +16367,8 @@
                                              GEN_INT (1)));
       rtx op1 = lowpart_subreg (V4SImode, op0, GET_MODE (op0));
       rtx op2 = gen_rtx_VEC_SELECT (V4SImode, op1, mask);
-      insn = gen_rtx_SET (op1, op2);
+      emit_insn (gen_rtx_SET (op1, op2));
     }
-  emit_insn (insn);
   operands[0] = lowpart_subreg (V1TImode, op0, GET_MODE (op0));
 }
   [(set_attr "mmx_isa" "native,x64_noavx,x64_avx")

Reply via email to