https://gcc.gnu.org/g:2b3027bea3f218599d36379d3d593841df7a1559

commit r15-1899-g2b3027bea3f218599d36379d3d593841df7a1559
Author: Uros Bizjak <ubiz...@gmail.com>
Date:   Mon Jul 8 20:47:52 2024 +0200

    i386: Promote {QI,HI}mode x86_mov<mode>cc_0_m1_neg to SImode
    
    Promote HImode x86_mov<mode>cc_0_m1_neg insn to SImode to avoid
    redundant prefixes. Also promote QImode insn when TARGET_PROMOTE_QImode
    is set. This is similar to promotable_binary_operator splitter, where we
    promote the result to SImode.
    
    Also correct insn condition for splitters to SImode of NEG and NOT
    instructions. The sizes of QImode and SImode instructions are always
    the same, so there is no need for optimize_insn_for_size bypass.
    
            gcc/ChangeLog:
    
            * config/i386/i386.md (x86_mov<mode>cc_0_m1_neg splitter to SImode):
            New splitter.
            (NEG and NOT splitter to SImode): Remove optimize_insn_for_size_p
            predicate from insn condition.

Diff:
---
 gcc/config/i386/i386.md | 25 +++++++++++++++++++------
 1 file changed, 19 insertions(+), 6 deletions(-)

diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md
index b24c4fe58750..214cb2e239ae 100644
--- a/gcc/config/i386/i386.md
+++ b/gcc/config/i386/i386.md
@@ -26576,9 +26576,7 @@
    (clobber (reg:CC FLAGS_REG))]
   "! TARGET_PARTIAL_REG_STALL && reload_completed
    && (GET_MODE (operands[0]) == HImode
-       || (GET_MODE (operands[0]) == QImode
-          && (TARGET_PROMOTE_QImode
-              || optimize_insn_for_size_p ())))"
+       || (GET_MODE (operands[0]) == QImode && TARGET_PROMOTE_QImode))"
   [(parallel [(set (match_dup 0)
                   (neg:SI (match_dup 1)))
              (clobber (reg:CC FLAGS_REG))])]
@@ -26593,15 +26591,30 @@
        (not (match_operand 1 "general_reg_operand")))]
   "! TARGET_PARTIAL_REG_STALL && reload_completed
    && (GET_MODE (operands[0]) == HImode
-       || (GET_MODE (operands[0]) == QImode
-          && (TARGET_PROMOTE_QImode
-              || optimize_insn_for_size_p ())))"
+       || (GET_MODE (operands[0]) == QImode && TARGET_PROMOTE_QImode))"
   [(set (match_dup 0)
        (not:SI (match_dup 1)))]
 {
   operands[0] = gen_lowpart (SImode, operands[0]);
   operands[1] = gen_lowpart (SImode, operands[1]);
 })
+
+(define_split
+  [(set (match_operand 0 "general_reg_operand")
+       (neg (match_operator 1 "ix86_carry_flag_operator"
+             [(reg FLAGS_REG) (const_int 0)])))
+   (clobber (reg:CC FLAGS_REG))]
+  "! TARGET_PARTIAL_REG_STALL && reload_completed
+   && (GET_MODE (operands[0]) == HImode
+       || (GET_MODE (operands[0]) == QImode && TARGET_PROMOTE_QImode))"
+  [(parallel [(set (match_dup 0)
+                  (neg:SI (match_dup 1)))
+             (clobber (reg:CC FLAGS_REG))])]
+{
+  operands[0] = gen_lowpart (SImode, operands[0]);
+  operands[1] = shallow_copy_rtx (operands[1]);
+  PUT_MODE (operands[1], SImode);
+})
 
 ;; RTL Peephole optimizations, run before sched2.  These primarily look to
 ;; transform a complex memory operation into two memory to register operations.

Reply via email to