This patch is part of a series that fixes ambiguous attribute uses in .md files, i.e. cases in which attributes didn't use <ITER:ATTR> to specify an iterator, and in which <ATTR> could have different values depending on the iterator chosen.
No behavioural change except for dropping the unused SVE divide permutations. I can self-approve the SVE bits, but OK for the rest? Richard 2019-07-05 Richard Sandiford <richard.sandif...@arm.com> gcc/ * config/aarch64/aarch64.md (*compare_condjump<mode>) (loadwb_pair<GPI:mode>_<P:mode>, loadwb_pair<GPF:mode>_<P:mode>) (storewb_pair<GPI:mode>_<P:mode>, storewb_pair<GPF:mode>_<P:mode>) (*ands<mode>_compare0): Fix ambiguous uses of .md attributes. * config/aarch64/aarch64-simd.md (*aarch64_get_lane_extend<GPI:mode><VDQQH:mode>): Likewise. (*aarch64_get_lane_zero_extend<GPI:mode><VDQQH:mode>): Likewise. * config/aarch64/aarch64-sve.md (while_ult<GPI:mode><PRED_ALL:mode>): Likewise. (*cond_<optab><mode>_any): Fix SVE_I/SVE_SDI typo. Index: gcc/config/aarch64/aarch64.md =================================================================== --- gcc/config/aarch64/aarch64.md 2019-07-03 20:51:55.225747145 +0100 +++ gcc/config/aarch64/aarch64.md 2019-07-05 15:04:46.680794809 +0100 @@ -567,14 +567,14 @@ (define_insn "condjump" ;; sub x0, x1, #(CST & 0xfff000) ;; subs x0, x0, #(CST & 0x000fff) ;; b<ne,eq> .Label -(define_insn_and_split "*compare_condjump<mode>" +(define_insn_and_split "*compare_condjump<GPI:mode>" [(set (pc) (if_then_else (EQL (match_operand:GPI 0 "register_operand" "r") (match_operand:GPI 1 "aarch64_imm24" "n")) (label_ref:P (match_operand 2 "" "")) (pc)))] - "!aarch64_move_imm (INTVAL (operands[1]), <MODE>mode) - && !aarch64_plus_operand (operands[1], <MODE>mode) + "!aarch64_move_imm (INTVAL (operands[1]), <GPI:MODE>mode) + && !aarch64_plus_operand (operands[1], <GPI:MODE>mode) && !reload_completed" "#" "&& true" @@ -582,11 +582,12 @@ (define_insn_and_split "*compare_condjum { HOST_WIDE_INT lo_imm = UINTVAL (operands[1]) & 0xfff; HOST_WIDE_INT hi_imm = UINTVAL (operands[1]) & 0xfff000; - rtx tmp = gen_reg_rtx (<MODE>mode); - emit_insn (gen_add<mode>3 (tmp, operands[0], GEN_INT (-hi_imm))); - emit_insn (gen_add<mode>3_compare0 (tmp, tmp, GEN_INT (-lo_imm))); + rtx tmp = gen_reg_rtx (<GPI:MODE>mode); + emit_insn (gen_add<GPI:mode>3 (tmp, operands[0], GEN_INT (-hi_imm))); + emit_insn (gen_add<GPI:mode>3_compare0 (tmp, tmp, GEN_INT (-lo_imm))); rtx cc_reg = gen_rtx_REG (CC_NZmode, CC_REGNUM); - rtx cmp_rtx = gen_rtx_fmt_ee (<EQL:CMP>, <MODE>mode, cc_reg, const0_rtx); + rtx cmp_rtx = gen_rtx_fmt_ee (<EQL:CMP>, <GPI:MODE>mode, + cc_reg, const0_rtx); emit_jump_insn (gen_condjump (cmp_rtx, cc_reg, operands[2])); DONE; } @@ -1505,8 +1506,8 @@ (define_insn "loadwb_pair<GPI:mode>_<P:m (mem:GPI (plus:P (match_dup 1) (match_operand:P 5 "const_int_operand" "n"))))])] "INTVAL (operands[5]) == GET_MODE_SIZE (<GPI:MODE>mode)" - "ldp\\t%<w>2, %<w>3, [%1], %4" - [(set_attr "type" "load_<ldpstp_sz>")] + "ldp\\t%<GPI:w>2, %<GPI:w>3, [%1], %4" + [(set_attr "type" "load_<GPI:ldpstp_sz>")] ) (define_insn "loadwb_pair<GPF:mode>_<P:mode>" @@ -1520,7 +1521,7 @@ (define_insn "loadwb_pair<GPF:mode>_<P:m (mem:GPF (plus:P (match_dup 1) (match_operand:P 5 "const_int_operand" "n"))))])] "INTVAL (operands[5]) == GET_MODE_SIZE (<GPF:MODE>mode)" - "ldp\\t%<w>2, %<w>3, [%1], %4" + "ldp\\t%<GPF:w>2, %<GPF:w>3, [%1], %4" [(set_attr "type" "neon_load1_2reg")] ) @@ -1553,8 +1554,8 @@ (define_insn "storewb_pair<GPI:mode>_<P: (match_operand:P 5 "const_int_operand" "n"))) (match_operand:GPI 3 "register_operand" "r"))])] "INTVAL (operands[5]) == INTVAL (operands[4]) + GET_MODE_SIZE (<GPI:MODE>mode)" - "stp\\t%<w>2, %<w>3, [%0, %4]!" - [(set_attr "type" "store_<ldpstp_sz>")] + "stp\\t%<GPI:w>2, %<GPI:w>3, [%0, %4]!" + [(set_attr "type" "store_<GPI:ldpstp_sz>")] ) (define_insn "storewb_pair<GPF:mode>_<P:mode>" @@ -1569,7 +1570,7 @@ (define_insn "storewb_pair<GPF:mode>_<P: (match_operand:P 5 "const_int_operand" "n"))) (match_operand:GPF 3 "register_operand" "w"))])] "INTVAL (operands[5]) == INTVAL (operands[4]) + GET_MODE_SIZE (<GPF:MODE>mode)" - "stp\\t%<w>2, %<w>3, [%0, %4]!" + "stp\\t%<GPF:w>2, %<GPF:w>3, [%0, %4]!" [(set_attr "type" "neon_store1_2reg<q>")] ) @@ -4782,7 +4783,7 @@ (define_insn "*and<mode>_compare0" [(set_attr "type" "alus_imm")] ) -(define_insn "*ands<mode>_compare0" +(define_insn "*ands<GPI:mode>_compare0" [(set (reg:CC_NZ CC_REGNUM) (compare:CC_NZ (zero_extend:GPI (match_operand:SHORT 1 "register_operand" "r")) Index: gcc/config/aarch64/aarch64-simd.md =================================================================== --- gcc/config/aarch64/aarch64-simd.md 2019-07-03 20:51:55.225747145 +0100 +++ gcc/config/aarch64/aarch64-simd.md 2019-07-05 15:04:46.676794843 +0100 @@ -3135,30 +3135,31 @@ (define_expand "vcondu<mode><v_cmp_mixed (define_insn "*aarch64_get_lane_extend<GPI:mode><VDQQH:mode>" [(set (match_operand:GPI 0 "register_operand" "=r") (sign_extend:GPI - (vec_select:<VEL> + (vec_select:<VDQQH:VEL> (match_operand:VDQQH 1 "register_operand" "w") (parallel [(match_operand:SI 2 "immediate_operand" "i")]))))] "TARGET_SIMD" { - operands[2] = aarch64_endian_lane_rtx (<MODE>mode, INTVAL (operands[2])); + operands[2] = aarch64_endian_lane_rtx (<VDQQH:MODE>mode, + INTVAL (operands[2])); return "smov\\t%<GPI:w>0, %1.<VDQQH:Vetype>[%2]"; } - [(set_attr "type" "neon_to_gp<q>")] -) - -(define_insn "*aarch64_get_lane_zero_extend<GPI:mode><VDQQH:mode>" - [(set (match_operand:GPI 0 "register_operand" "=r") - (zero_extend:GPI - (vec_select:<VEL> - (match_operand:VDQQH 1 "register_operand" "w") - (parallel [(match_operand:SI 2 "immediate_operand" "i")]))))] - "TARGET_SIMD" - { - operands[2] = aarch64_endian_lane_rtx (<VDQQH:MODE>mode, - INTVAL (operands[2])); - return "umov\\t%w0, %1.<Vetype>[%2]"; - } - [(set_attr "type" "neon_to_gp<q>")] + [(set_attr "type" "neon_to_gp<VDQQH:q>")] +) + +(define_insn "*aarch64_get_lane_zero_extend<GPI:mode><VDQQH:mode>" + [(set (match_operand:GPI 0 "register_operand" "=r") + (zero_extend:GPI + (vec_select:<VDQQH:VEL> + (match_operand:VDQQH 1 "register_operand" "w") + (parallel [(match_operand:SI 2 "immediate_operand" "i")]))))] + "TARGET_SIMD" + { + operands[2] = aarch64_endian_lane_rtx (<VDQQH:MODE>mode, + INTVAL (operands[2])); + return "umov\\t%w0, %1.<VDQQH:Vetype>[%2]"; + } + [(set_attr "type" "neon_to_gp<VDQQH:q>")] ) ;; Lane extraction of a value, neither sign nor zero extension Index: gcc/config/aarch64/aarch64-sve.md =================================================================== --- gcc/config/aarch64/aarch64-sve.md 2019-07-03 20:51:55.225747145 +0100 +++ gcc/config/aarch64/aarch64-sve.md 2019-07-05 15:04:46.680794809 +0100 @@ -1363,7 +1363,7 @@ (define_insn_and_rewrite "*while_ult<GPI ;; don't have an unnecessary PTRUE. "&& !CONSTANT_P (operands[1])" { - operands[1] = CONSTM1_RTX (<MODE>mode); + operands[1] = CONSTM1_RTX (<PRED_ALL:MODE>mode); } )