Give the `define_insn` rules used in lowering `cbranch<mode>4` to RTL
more descriptive and consistent names: from now on, each rule is named
after the AArch64 instruction that it generates. Also add comments to
document each rule.

gcc/ChangeLog:

        * config/aarch64/aarch64.md (condjump): Rename to ...
        (aarch64_bcond): ...here.
        (*compare_condjump<GPI:mode>): Rename to ...
        (*aarch64_bcond_wide_imm<GPI:mode>): ...here.
        (restore_stack_nonlocal): Handle rename.
        (stack_protect_combined_test): Likewise.
        * config/aarch64/aarch64-simd.md (cbranch<mode>4): Likewise.
        * config/aarch64/aarch64-sme.md (aarch64_restore_za): Likewise.
        * config/aarch64/aarch64.cc (aarch64_gen_test_and_branch): Likewise.
---
 gcc/config/aarch64/aarch64-simd.md |  2 +-
 gcc/config/aarch64/aarch64-sme.md  |  2 +-
 gcc/config/aarch64/aarch64.cc      |  4 ++--
 gcc/config/aarch64/aarch64.md      | 21 ++++++++++++---------
 4 files changed, 16 insertions(+), 13 deletions(-)

diff --git a/gcc/config/aarch64/aarch64-simd.md 
b/gcc/config/aarch64/aarch64-simd.md
index e2afe87e513..197a5f65f34 100644
--- a/gcc/config/aarch64/aarch64-simd.md
+++ b/gcc/config/aarch64/aarch64-simd.md
@@ -3913,41 +3913,41 @@ (define_expand "vcond_mask_<mode><v_int_equiv>"
 (define_expand "cbranch<mode>4"
   [(set (pc)
         (if_then_else
           (match_operator 0 "aarch64_equality_operator"
             [(match_operand:VDQ_I 1 "register_operand")
              (match_operand:VDQ_I 2 "aarch64_simd_reg_or_zero")])
           (label_ref (match_operand 3 ""))
           (pc)))]
   "TARGET_SIMD"
 {
   auto code = GET_CODE (operands[0]);
   rtx tmp = operands[1];
 
   /* If comparing against a non-zero vector we have to do a comparison first
      so we can have a != 0 comparison with the result.  */
   if (operands[2] != CONST0_RTX (<MODE>mode))
     {
       tmp = gen_reg_rtx (<MODE>mode);
       emit_insn (gen_xor<mode>3 (tmp, operands[1], operands[2]));
     }
 
   /* For 64-bit vectors we need no reductions.  */
   if (known_eq (128, GET_MODE_BITSIZE (<MODE>mode)))
     {
       /* Always reduce using a V4SI.  */
       rtx reduc = gen_lowpart (V4SImode, tmp);
       rtx res = gen_reg_rtx (V4SImode);
       emit_insn (gen_aarch64_umaxpv4si (res, reduc, reduc));
       emit_move_insn (tmp, gen_lowpart (<MODE>mode, res));
     }
 
   rtx val = gen_reg_rtx (DImode);
   emit_move_insn (val, gen_lowpart (DImode, tmp));
 
   rtx cc_reg = aarch64_gen_compare_reg (code, val, const0_rtx);
   rtx cmp_rtx = gen_rtx_fmt_ee (code, DImode, cc_reg, const0_rtx);
-  emit_jump_insn (gen_condjump (cmp_rtx, cc_reg, operands[3]));
+  emit_jump_insn (gen_aarch64_bcond (cmp_rtx, cc_reg, operands[3]));
   DONE;
 })
 
 ;; Patterns comparing two vectors to produce a mask.
diff --git a/gcc/config/aarch64/aarch64-sme.md 
b/gcc/config/aarch64/aarch64-sme.md
index c49affd0dd3..4e4ac71c5a3 100644
--- a/gcc/config/aarch64/aarch64-sme.md
+++ b/gcc/config/aarch64/aarch64-sme.md
@@ -366,42 +366,42 @@ (define_insn "aarch64_tpidr2_restore"
 ;; Check whether a lazy save set up by aarch64_save_za was committed
 ;; and restore the saved contents if so.
 ;;
 ;; Operand 0 is the address of the current function's TPIDR2 block.
 (define_insn_and_split "aarch64_restore_za"
   [(set (reg:DI ZA_SAVED_REGNUM)
        (unspec:DI [(match_operand 0 "pmode_register_operand" "r")
                    (reg:DI SME_STATE_REGNUM)
                    (reg:DI TPIDR2_SETUP_REGNUM)
                    (reg:DI ZA_SAVED_REGNUM)] UNSPEC_RESTORE_ZA))
    (clobber (reg:DI R0_REGNUM))
    (clobber (reg:DI R14_REGNUM))
    (clobber (reg:DI R15_REGNUM))
    (clobber (reg:DI R16_REGNUM))
    (clobber (reg:DI R17_REGNUM))
    (clobber (reg:DI R18_REGNUM))
    (clobber (reg:DI R30_REGNUM))
    (clobber (reg:CC CC_REGNUM))]
   ""
   "#"
   "&& epilogue_completed"
   [(const_int 0)]
   {
     auto label = gen_label_rtx ();
     auto tpidr2 = gen_rtx_REG (DImode, R16_REGNUM);
     emit_insn (gen_aarch64_read_tpidr2 (tpidr2));
-    auto jump = emit_likely_jump_insn (gen_aarch64_cbnedi1 (tpidr2, label));
+    auto jump = emit_likely_jump_insn (gen_aarch64_cbznedi1 (tpidr2, label));
     JUMP_LABEL (jump) = label;
 
     aarch64_restore_za (operands[0]);
     emit_label (label);
     DONE;
   }
 )
 
 ;; This instruction is emitted after asms that alter ZA, in order to model
 ;; the effect on dataflow.  The asm itself can't have ZA as an input or
 ;; an output, since there is no associated data type.  Instead it retains
 ;; the original "za" clobber, which on its own would indicate that ZA
 ;; is dead.
 ;;
 ;; The operand is a unique identifier.
diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
index fff8d9da49d..b5ac6d3f37e 100644
--- a/gcc/config/aarch64/aarch64.cc
+++ b/gcc/config/aarch64/aarch64.cc
@@ -2872,44 +2872,44 @@ static rtx
 aarch64_gen_test_and_branch (rtx_code code, rtx x, int bitnum,
                             rtx_code_label *label)
 {
   auto mode = GET_MODE (x);
   if (aarch64_track_speculation)
     {
       auto mask = gen_int_mode (HOST_WIDE_INT_1U << bitnum, mode);
       emit_insn (gen_aarch64_and3nr_compare0 (mode, x, mask));
       rtx cc_reg = gen_rtx_REG (CC_NZVmode, CC_REGNUM);
       rtx x = gen_rtx_fmt_ee (code, CC_NZVmode, cc_reg, const0_rtx);
-      return gen_condjump (x, cc_reg, label);
+      return gen_aarch64_bcond (x, cc_reg, label);
     }
-  return gen_aarch64_tb (code, mode, mode,
+  return gen_aarch64_tbz (code, mode, mode,
                         x, gen_int_mode (bitnum, mode), label);
 }
 
 /* Consider the operation:
 
      OPERANDS[0] = CODE (OPERANDS[1], OPERANDS[2]) + OPERANDS[3]
 
    where:
 
    - CODE is [SU]MAX or [SU]MIN
    - OPERANDS[2] and OPERANDS[3] are constant integers
    - OPERANDS[3] is a positive or negative shifted 12-bit immediate
    - all operands have mode MODE
 
    Decide whether it is possible to implement the operation using:
 
      SUBS <tmp>, OPERANDS[1], -OPERANDS[3]
      or
      ADDS <tmp>, OPERANDS[1], OPERANDS[3]
 
    followed by:
 
      <insn> OPERANDS[0], <tmp>, [wx]zr, <cond>
 
    where <insn> is one of CSEL, CSINV or CSINC.  Return true if so.
    If GENERATE_P is true, also update OPERANDS as follows:
 
      OPERANDS[4] = -OPERANDS[3]
      OPERANDS[5] = the rtl condition representing <cond>
      OPERANDS[6] = <tmp>
      OPERANDS[7] = 0 for CSEL, -1 for CSINV or 1 for CSINC.  */
diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
index 7d0af5bd700..1b1e982d466 100644
--- a/gcc/config/aarch64/aarch64.md
+++ b/gcc/config/aarch64/aarch64.md
@@ -740,135 +740,138 @@ (define_expand "cbranchcc4"
   ""
 )
 
-(define_insn "condjump"
+;; Emit `B<cond>`, assuming that the condition is already in the CC register.
+(define_insn "aarch64_bcond"
   [(set (pc) (if_then_else (match_operator 0 "aarch64_comparison_operator"
                            [(match_operand 1 "cc_register")
                             (const_int 0)])
                           (label_ref (match_operand 2))
                           (pc)))]
   ""
   {
     /* GCC's traditional style has been to use "beq" instead of "b.eq", etc.,
        but the "." is required for SVE conditions.  */
     bool use_dot_p = GET_MODE (operands[1]) == CC_NZCmode;
     if (get_attr_length (insn) == 8)
       return aarch64_gen_far_branch (operands, 2, "Lbcond",
                                     use_dot_p ? "b.%M0\\t" : "b%M0\\t");
     else
       return use_dot_p ? "b.%m0\\t%l2" : "b%m0\\t%l2";
   }
   [(set_attr "type" "branch")
    (set (attr "length")
        (if_then_else (and (ge (minus (match_dup 2) (pc)) (const_int -1048576))
                           (lt (minus (match_dup 2) (pc)) (const_int 1048572)))
                      (const_int 4)
                      (const_int 8)))
    (set (attr "far_branch")
        (if_then_else (and (ge (minus (match_dup 2) (pc)) (const_int -1048576))
                           (lt (minus (match_dup 2) (pc)) (const_int 1048572)))
                      (const_int 0)
                      (const_int 1)))]
 )
 
 ;; For a 24-bit immediate CST we can optimize the compare for equality
 ;; and branch sequence from:
 ;;     mov     x0, #imm1
 ;;     movk    x0, #imm2, lsl 16 /* x0 contains CST.  */
 ;;     cmp     x1, x0
 ;;     b<ne,eq> .Label
 ;; into the shorter:
 ;;     sub     x0, x1, #(CST & 0xfff000)
 ;;     subs    x0, x0, #(CST & 0x000fff)
 ;;     b<ne,eq> .Label
-(define_insn_and_split "*compare_condjump<GPI:mode>"
+(define_insn_and_split "*aarch64_bcond_wide_imm<GPI:mode>"
   [(set (pc) (if_then_else (EQL (match_operand:GPI 0 "register_operand" "r")
                                (match_operand:GPI 1 "aarch64_imm24" "n"))
                           (label_ref:P (match_operand 2))
                           (pc)))]
   "!aarch64_move_imm (INTVAL (operands[1]), <GPI:MODE>mode)
    && !aarch64_plus_operand (operands[1], <GPI:MODE>mode)
    && !reload_completed"
   "#"
   "&& true"
   [(const_int 0)]
   {
     HOST_WIDE_INT lo_imm = UINTVAL (operands[1]) & 0xfff;
     HOST_WIDE_INT hi_imm = UINTVAL (operands[1]) & 0xfff000;
     rtx tmp = gen_reg_rtx (<GPI:MODE>mode);
     emit_insn (gen_add<GPI:mode>3 (tmp, operands[0], GEN_INT (-hi_imm)));
     emit_insn (gen_add<GPI:mode>3_compare0 (tmp, tmp, GEN_INT (-lo_imm)));
     rtx cc_reg = gen_rtx_REG (CC_NZmode, CC_REGNUM);
     rtx cmp_rtx = gen_rtx_fmt_ee (<EQL:CMP>, <GPI:MODE>mode,
                                  cc_reg, const0_rtx);
-    emit_jump_insn (gen_condjump (cmp_rtx, cc_reg, operands[2]));
+    emit_jump_insn (gen_aarch64_bcond (cmp_rtx, cc_reg, operands[2]));
     DONE;
   }
 )
 
-(define_insn "aarch64_cb<optab><mode>1"
+;; For an EQ/NE comparison against zero, emit `CBZ`/`CBNZ`
+(define_insn "aarch64_cbz<optab><mode>1"
   [(set (pc) (if_then_else (EQL (match_operand:GPI 0 "register_operand" "r")
                                (const_int 0))
                           (label_ref (match_operand 1))
                           (pc)))]
   "!aarch64_track_speculation"
   {
     if (get_attr_length (insn) == 8)
       return aarch64_gen_far_branch (operands, 1, "Lcb", "<inv_cb>\\t%<w>0, ");
     else
       return "<cbz>\\t%<w>0, %l1";
   }
   [(set_attr "type" "branch")
    (set (attr "length")
        (if_then_else (and (ge (minus (match_dup 1) (pc)) (const_int -1048576))
                           (lt (minus (match_dup 1) (pc)) (const_int 1048572)))
                      (const_int 4)
                      (const_int 8)))
    (set (attr "far_branch")
        (if_then_else (and (ge (minus (match_dup 2) (pc)) (const_int -1048576))
                           (lt (minus (match_dup 2) (pc)) (const_int 1048572)))
                      (const_int 0)
                      (const_int 1)))]
 )
 
-(define_insn "*cb<optab><mode>1"
+;; For an LT/GE comparison against zero, emit `TBZ`/`TBNZ`
+(define_insn "*aarch64_tbz<optab><mode>1"
   [(set (pc) (if_then_else (LTGE (match_operand:ALLI 0 "register_operand" "r")
                                 (const_int 0))
                           (label_ref (match_operand 1))
                           (pc)))
    (clobber (reg:CC CC_REGNUM))]
   "!aarch64_track_speculation"
   {
     if (get_attr_length (insn) == 8)
       {
        if (get_attr_far_branch (insn) == 1)
          return aarch64_gen_far_branch (operands, 1, "Ltb",
                                         "<inv_tb>\\t%<w>0, <sizem1>, ");
        else
          {
            char buf[64];
            uint64_t val = ((uint64_t) 1)
                << (GET_MODE_SIZE (<MODE>mode) * BITS_PER_UNIT - 1);
            sprintf (buf, "tst\t%%<w>0, %" PRId64, val);
            output_asm_insn (buf, operands);
            return "<bcond>\t%l1";
          }
       }
     else
       return "<tbz>\t%<w>0, <sizem1>, %l1";
   }
   [(set_attr "type" "branch")
    (set (attr "length")
        (if_then_else (and (ge (minus (match_dup 1) (pc)) (const_int -32768))
                           (lt (minus (match_dup 1) (pc)) (const_int 32764)))
                      (const_int 4)
                      (const_int 8)))
    (set (attr "far_branch")
        (if_then_else (and (ge (minus (match_dup 1) (pc)) (const_int -1048576))
                           (lt (minus (match_dup 1) (pc)) (const_int 1048572)))
                      (const_int 0)
                      (const_int 1)))]
 )
 
 ;; -------------------------------------------------------------------
 ;; Test bit and branch
 ;; -------------------------------------------------------------------
@@ -891,42 +894,42 @@ (define_expand "tbranch_<code><mode>3"
                                         operands[1]);
 })
 
-(define_insn "@aarch64_tb<optab><ALLI:mode><GPI:mode>"
+(define_insn "@aarch64_tbz<optab><ALLI:mode><GPI:mode>"
   [(set (pc) (if_then_else (EQL
                             (zero_extract:GPI
                               (match_operand:ALLI 0 "register_operand" "r")
                               (const_int 1)
                               (match_operand 1 
"aarch64_simd_shift_imm_<ALLI:mode>" "n"))
                             (const_int 0))
                           (label_ref (match_operand 2))
                           (pc)))
    (clobber (reg:CC CC_REGNUM))]
   "!aarch64_track_speculation"
   {
     if (get_attr_length (insn) == 8)
       {
        if (get_attr_far_branch (insn) == 1)
          return aarch64_gen_far_branch (operands, 2, "Ltb",
                                         "<inv_tb>\\t%<ALLI:w>0, %1, ");
        else
          {
            operands[1] = GEN_INT (HOST_WIDE_INT_1U << UINTVAL (operands[1]));
            return "tst\t%<ALLI:w>0, %1\;<bcond>\t%l2";
          }
       }
     else
       return "<tbz>\t%<ALLI:w>0, %1, %l2";
   }
   [(set_attr "type" "branch")
    (set (attr "length")
        (if_then_else (and (ge (minus (match_dup 2) (pc)) (const_int -32768))
                           (lt (minus (match_dup 2) (pc)) (const_int 32764)))
                      (const_int 4)
                      (const_int 8)))
    (set (attr "far_branch")
        (if_then_else (and (ge (minus (match_dup 2) (pc)) (const_int -1048576))
                           (lt (minus (match_dup 2) (pc)) (const_int 1048572)))
                      (const_int 0)
                      (const_int 1)))]
 
 )
@@ -1287,53 +1290,53 @@ (define_expand "save_stack_nonlocal"
 (define_expand "restore_stack_nonlocal"
   [(set (match_operand 0 "register_operand" "")
        (match_operand 1 "memory_operand" ""))]
   ""
 {
   rtx stack_slot = adjust_address (operands[1], Pmode, 0);
   emit_move_insn (operands[0], stack_slot);
 
   if (aarch64_gcs_enabled ())
     {
       /* Restore GCS with code like
                mov     x16, 1
                chkfeat x16
                tbnz    x16, 0, .L_done
                ldr     tmp1, [%1, 8]
                mrs     tmp2, gcspr_el0
                subs    tmp2, tmp1, tmp2
                b.eq    .L_done
        .L_loop:
                gcspopm
                subs    tmp2, tmp2, 8
                b.ne    .L_loop
        .L_done:  */
 
       rtx loop_label = gen_label_rtx ();
       rtx done_label = gen_label_rtx ();
       rtx r16 = gen_rtx_REG (DImode, R16_REGNUM);
       emit_move_insn (r16, const1_rtx);
       emit_insn (gen_aarch64_chkfeat ());
       emit_insn (gen_tbranch_neqi3 (r16, const0_rtx, done_label));
       rtx gcs_slot = adjust_address (operands[1], Pmode, GET_MODE_SIZE 
(Pmode));
       rtx gcs_old = gen_reg_rtx (Pmode);
       emit_move_insn (gcs_old, gcs_slot);
       rtx gcs_now = gen_reg_rtx (Pmode);
       emit_insn (gen_aarch64_load_gcspr (gcs_now));
       emit_insn (gen_subdi3_compare1 (gcs_now, gcs_old, gcs_now));
       rtx cc_reg = gen_rtx_REG (CC_NZmode, CC_REGNUM);
       rtx cmp_rtx = gen_rtx_fmt_ee (EQ, DImode, cc_reg, const0_rtx);
-      emit_jump_insn (gen_condjump (cmp_rtx, cc_reg, done_label));
+      emit_jump_insn (gen_aarch64_bcond (cmp_rtx, cc_reg, done_label));
       emit_label (loop_label);
       emit_insn (gen_aarch64_gcspopm_xzr ());
       emit_insn (gen_adddi3_compare0 (gcs_now, gcs_now, GEN_INT (-8)));
       cc_reg = gen_rtx_REG (CC_NZmode, CC_REGNUM);
       cmp_rtx = gen_rtx_fmt_ee (NE, DImode, cc_reg, const0_rtx);
-      emit_jump_insn (gen_condjump (cmp_rtx, cc_reg, loop_label));
+      emit_jump_insn (gen_aarch64_bcond (cmp_rtx, cc_reg, loop_label));
       emit_label (done_label);
     }
   DONE;
 })
 
 ;; -------------------------------------------------------------------
 ;; Subroutine calls and sibcalls
 ;; -------------------------------------------------------------------
@@ -8117,21 +8120,21 @@ (define_expand "stack_protect_test"
 (define_expand "stack_protect_combined_test"
   [(match_operand 0 "memory_operand")
    (match_operand 1 "")
    (match_operand 2)]
   ""
 {
   machine_mode mode = GET_MODE (operands[0]);
   operands[1] = aarch64_stack_protect_canary_mem (mode, operands[1],
                                                  AARCH64_SALT_SSP_TEST);
   emit_insn ((mode == DImode
             ? gen_stack_protect_test_di
             : gen_stack_protect_test_si) (operands[0], operands[1]));
 
   rtx cc_reg = gen_rtx_REG (CCmode, CC_REGNUM);
-  emit_jump_insn (gen_condjump (gen_rtx_EQ (VOIDmode, cc_reg, const0_rtx),
+  emit_jump_insn (gen_aarch64_bcond (gen_rtx_EQ (VOIDmode, cc_reg, const0_rtx),
                                cc_reg, operands[2]));
   DONE;
 })
 
 ;; DO NOT SPLIT THIS PATTERN.  It is important for security reasons that the
 ;; canary value does not live beyond the end of this sequence.
-- 
2.45.2

Reply via email to