Give the `define_insn` rules used in lowering `cbranch<mode>4` to RTL more descriptive and consistent names: from now on, each rule is named after the AArch64 instruction that it generates. Also add comments to document each rule.
gcc/ChangeLog: * config/aarch64/aarch64.md (condjump): rename to ... (aarch64_bcond): ...here. (*compare_condjump<GPI:mode>): rename to ... (*aarch64_bcond_wide_imm<GPI:mode>): ...here. (restore_stack_nonlocal): handle rename. (stack_protect_combined_test): likewise. * config/aarch64/aarch64-simd.md (cbranch<mode>4): likewise. * config/aarch64/aarch64-sme.md (aarch64_restore_za): likewise. * config/aarch64/aarch64.cc (aarch64_gen_test_and_branch): likewise. --- gcc/config/aarch64/aarch64-simd.md | 2 +- gcc/config/aarch64/aarch64-sme.md | 3 ++- gcc/config/aarch64/aarch64.cc | 2 +- gcc/config/aarch64/aarch64.md | 15 +++++++++------ 4 files changed, 13 insertions(+), 9 deletions(-) diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index e2afe87e513..197a5f65f34 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -3946,7 +3946,7 @@ (define_expand "cbranch<mode>4" rtx cc_reg = aarch64_gen_compare_reg (code, val, const0_rtx); rtx cmp_rtx = gen_rtx_fmt_ee (code, DImode, cc_reg, const0_rtx); - emit_jump_insn (gen_condjump (cmp_rtx, cc_reg, operands[3])); + emit_jump_insn (gen_aarch64_bcond (cmp_rtx, cc_reg, operands[3])); DONE; }) diff --git a/gcc/config/aarch64/aarch64-sme.md b/gcc/config/aarch64/aarch64-sme.md index c49affd0dd3..6a7c31acf0a 100644 --- a/gcc/config/aarch64/aarch64-sme.md +++ b/gcc/config/aarch64/aarch64-sme.md @@ -389,7 +389,8 @@ (define_insn_and_split "aarch64_restore_za" auto label = gen_label_rtx (); auto tpidr2 = gen_rtx_REG (DImode, R16_REGNUM); emit_insn (gen_aarch64_read_tpidr2 (tpidr2)); - auto jump = emit_likely_jump_insn (gen_aarch64_cbnedi1 (tpidr2, label)); + auto jump = emit_likely_jump_insn ( + gen_aarch64_cbnedi1 (tpidr2, label)); JUMP_LABEL (jump) = label; aarch64_restore_za (operands[0]); diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc index fff8d9da49d..c0afdeb87ee 100644 --- a/gcc/config/aarch64/aarch64.cc +++ b/gcc/config/aarch64/aarch64.cc @@ -2879,7 +2879,7 @@ aarch64_gen_test_and_branch (rtx_code code, rtx x, int bitnum, emit_insn (gen_aarch64_and3nr_compare0 (mode, x, mask)); rtx cc_reg = gen_rtx_REG (CC_NZVmode, CC_REGNUM); rtx x = gen_rtx_fmt_ee (code, CC_NZVmode, cc_reg, const0_rtx); - return gen_condjump (x, cc_reg, label); + return gen_aarch64_bcond (x, cc_reg, label); } return gen_aarch64_tb (code, mode, mode, x, gen_int_mode (bitnum, mode), label); diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md index 45b2283c5c0..23775ec58ca 100644 --- a/gcc/config/aarch64/aarch64.md +++ b/gcc/config/aarch64/aarch64.md @@ -740,7 +740,8 @@ (define_expand "cbranchcc4" "" ) -(define_insn "condjump" +;; Emit `B<cond>`, assuming that the condition is already in the CC register. +(define_insn "aarch64_bcond" [(set (pc) (if_then_else (match_operator 0 "aarch64_comparison_operator" [(match_operand 1 "cc_register") (const_int 0)]) @@ -780,7 +781,7 @@ (define_insn "condjump" ;; sub x0, x1, #(CST & 0xfff000) ;; subs x0, x0, #(CST & 0x000fff) ;; b<ne,eq> .Label -(define_insn_and_split "*compare_condjump<GPI:mode>" +(define_insn_and_split "*aarch64_bcond_wide_imm<GPI:mode>" [(set (pc) (if_then_else (EQL (match_operand:GPI 0 "register_operand" "r") (match_operand:GPI 1 "aarch64_imm24" "n")) @@ -801,11 +802,12 @@ (define_insn_and_split "*compare_condjump<GPI:mode>" rtx cc_reg = gen_rtx_REG (CC_NZmode, CC_REGNUM); rtx cmp_rtx = gen_rtx_fmt_ee (<EQL:CMP>, <GPI:MODE>mode, cc_reg, const0_rtx); - emit_jump_insn (gen_condjump (cmp_rtx, cc_reg, operands[2])); + emit_jump_insn (gen_aarch64_bcond (cmp_rtx, cc_reg, operands[2])); DONE; } ) +;; For an EQ/NE comparison against zero, emit `CBZ`/`CBNZ` (define_insn "aarch64_cb<optab><mode>1" [(set (pc) (if_then_else (EQL (match_operand:GPI 0 "register_operand" "r") @@ -832,6 +834,7 @@ (define_insn "aarch64_cb<optab><mode>1" (const_int 1)))] ) +;; For an LT/GE comparison against zero, emit `TBZ`/`TBNZ` (define_insn "*cb<optab><mode>1" [(set (pc) (if_then_else (LTGE (match_operand:ALLI 0 "register_operand" "r") @@ -1325,13 +1328,13 @@ (define_expand "restore_stack_nonlocal" emit_insn (gen_subdi3_compare1 (gcs_now, gcs_old, gcs_now)); rtx cc_reg = gen_rtx_REG (CC_NZmode, CC_REGNUM); rtx cmp_rtx = gen_rtx_fmt_ee (EQ, DImode, cc_reg, const0_rtx); - emit_jump_insn (gen_condjump (cmp_rtx, cc_reg, done_label)); + emit_jump_insn (gen_aarch64_bcond (cmp_rtx, cc_reg, done_label)); emit_label (loop_label); emit_insn (gen_aarch64_gcspopm_xzr ()); emit_insn (gen_adddi3_compare0 (gcs_now, gcs_now, GEN_INT (-8))); cc_reg = gen_rtx_REG (CC_NZmode, CC_REGNUM); cmp_rtx = gen_rtx_fmt_ee (NE, DImode, cc_reg, const0_rtx); - emit_jump_insn (gen_condjump (cmp_rtx, cc_reg, loop_label)); + emit_jump_insn (gen_aarch64_bcond (cmp_rtx, cc_reg, loop_label)); emit_label (done_label); } DONE; @@ -8131,7 +8134,7 @@ (define_expand "stack_protect_combined_test" : gen_stack_protect_test_si) (operands[0], operands[1])); rtx cc_reg = gen_rtx_REG (CCmode, CC_REGNUM); - emit_jump_insn (gen_condjump (gen_rtx_EQ (VOIDmode, cc_reg, const0_rtx), + emit_jump_insn (gen_aarch64_bcond (gen_rtx_EQ (VOIDmode, cc_reg, const0_rtx), cc_reg, operands[2])); DONE; }) -- 2.45.2