On Thu, 2018-12-06 at 12:25 +0000, Richard Sandiford wrote:
>
> Since we're looking at the call insns anyway, we could have a hook that
> "jousts" two calls and picks the one that preserves *fewer* registers.
> This would mean that loop produces a single instruction that conservatively
> describes the call-preserved registers. We could then stash that
> instruction in lra_reg instead of the current check_part_clobbered
> boolean.
>
> The hook should by default be a null pointer, so that we can avoid
> the instruction walk on targets that don't need it.
>
> That would mean that LRA would always have a call instruction to hand
> when asking about call-preserved information. So I think we should
> add an insn parameter to targetm.hard_regno_call_part_clobbered,
> with a null insn selecting the defaul behaviour. I know it's
> going to be a pain to update all callers and targets, sorry.
Richard, here is an updated version of this patch. It is not
completly tested yet but I wanted to send this out and make
sure it is what you had in mind and see if you had any comments about
the new target function while I am testing it (including building
some of the other targets).
Steve Ellcey
[email protected]
2019-01-04 Steve Ellcey <[email protected]>
* config/aarch64/aarch64.c (aarch64_simd_call_p): New function.
(aarch64_hard_regno_call_part_clobbered): Add insn argument.
(aarch64_return_call_with_max_clobbers): New function.
(TARGET_RETURN_CALL_WITH_MAX_CLOBBERS): New macro.
* config/avr/avr.c (avr_hard_regno_call_part_clobbered): Add insn
argument.
* config/i386/i386.c (ix86_hard_regno_call_part_clobbered): Ditto.
* config/mips/mips.c (mips_hard_regno_call_part_clobbered): Ditto.
* config/rs6000/rs6000.c (rs6000_hard_regno_call_part_clobbered): Ditto.
* config/s390/s390.c (s390_hard_regno_call_part_clobbered): Ditto.
* cselib.c (cselib_process_insn): Add argument to
targetm.hard_regno_call_part_clobbered call.
* conflicts.c (ira_build_conflicts): Ditto.
* ira-costs.c (ira_tune_allocno_costs): Ditto.
* lra-constraints.c (inherit_reload_reg): Ditto, plus refactor
return statement.
* lra-int.h (struct lra_reg): Add call_insn field.
* lra-lives.c (check_pseudos_live_through_calls): Add call_insn
argument. Add argument to targetm.hard_regno_call_part_clobbered
call.
(process_bb_lives): Use new target function
targetm.return_call_with_max_clobbers to set call_insn.
Pass call_insn to check_pseudos_live_through_calls.
Set call_insn in lra_reg_info.
* lra.c (initialize_lra_reg_info_element): Set call_insn to NULL.
* regcprop.c (copyprop_hardreg_forward_1): Add argument to
targetm.hard_regno_call_part_clobbered call.
* reginfo.c (choose_hard_reg_mode): Ditto.
* regrename.c (check_new_reg_p): Ditto.
* reload.c (find_equiv_reg): Ditto.
* reload1.c (emit_reload_insns): Ditto.
* sched-deps.c (deps_analyze_insn): Ditto.
* sel-sched.c (init_regs_for_mode): Ditto.
(mark_unavailable_hard_regs): Ditto.
* targhooks.c (default_dwarf_frame_reg_mode): Ditto.
* target.def (hard_regno_call_part_clobbered): Add insn argument.
(return_call_with_max_clobbers): New target function.
* doc/tm.texi: Regenerate.
* doc/tm.texi.in (TARGET_RETURN_CALL_WITH_MAX_CLOBBERS): New hook.
* hooks.c (hook_bool_uint_mode_false): Change to
hook_bool_insn_uint_mode_false.
* hooks.h (hook_bool_uint_mode_false): Ditto.
diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c
index c5036c8..87af31b 100644
--- a/gcc/config/aarch64/aarch64.c
+++ b/gcc/config/aarch64/aarch64.c
@@ -1565,16 +1565,55 @@ aarch64_reg_save_mode (tree fndecl, unsigned regno)
: (aarch64_simd_decl_p (fndecl) ? E_TFmode : E_DFmode);
}
+/* Return true if the instruction is a call to a SIMD function, false
+ if it is not a SIMD function or if we do not know anything about
+ the function. */
+
+static bool
+aarch64_simd_call_p (rtx_insn *insn)
+{
+ rtx symbol;
+ rtx call;
+ tree fndecl;
+
+ gcc_assert (CALL_P (insn));
+ call = get_call_rtx_from (insn);
+ symbol = XEXP (XEXP (call, 0), 0);
+ if (GET_CODE (symbol) != SYMBOL_REF)
+ return false;
+ fndecl = SYMBOL_REF_DECL (symbol);
+ if (!fndecl)
+ return false;
+
+ return aarch64_simd_decl_p (fndecl);
+}
+
/* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. The callee only saves
the lower 64 bits of a 128-bit register. Tell the compiler the callee
clobbers the top 64 bits when restoring the bottom 64 bits. */
static bool
-aarch64_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode)
+aarch64_hard_regno_call_part_clobbered (rtx_insn *insn,
+ unsigned int regno,
+ machine_mode mode)
{
+ if (insn && CALL_P (insn) && aarch64_simd_call_p (insn))
+ return false;
return FP_REGNUM_P (regno) && maybe_gt (GET_MODE_SIZE (mode), 8);
}
+/* Implement TARGET_RETURN_CALL_WITH_MAX_CLOBBERS. */
+
+rtx_insn *
+aarch64_return_call_with_max_clobbers (rtx_insn *call_1, rtx_insn *call_2)
+{
+ gcc_assert (CALL_P (call_1));
+ if ((call_2 == NULL_RTX) || aarch64_simd_call_p (call_2))
+ return call_1;
+ else
+ return call_2;
+}
+
/* Implement REGMODE_NATURAL_SIZE. */
poly_uint64
aarch64_regmode_natural_size (machine_mode mode)
@@ -18524,6 +18563,10 @@ aarch64_libgcc_floating_mode_supported_p
#define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
aarch64_hard_regno_call_part_clobbered
+#undef TARGET_RETURN_CALL_WITH_MAX_CLOBBERS
+#define TARGET_RETURN_CALL_WITH_MAX_CLOBBERS \
+ aarch64_return_call_with_max_clobbers
+
#undef TARGET_CONSTANT_ALIGNMENT
#define TARGET_CONSTANT_ALIGNMENT aarch64_constant_alignment
diff --git a/gcc/config/avr/avr.c b/gcc/config/avr/avr.c
index 023308b..2cf993d 100644
--- a/gcc/config/avr/avr.c
+++ b/gcc/config/avr/avr.c
@@ -12181,7 +12181,9 @@ avr_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
/* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
static bool
-avr_hard_regno_call_part_clobbered (unsigned regno, machine_mode mode)
+avr_hard_regno_call_part_clobbered (rtx_insn *insn ATTRIBUTE_UNUSED,
+ unsigned regno,
+ machine_mode mode)
{
/* FIXME: This hook gets called with MODE:REGNO combinations that don't
represent valid hard registers like, e.g. HI:29. Returning TRUE
diff --git a/gcc/config/i386/i386.c b/gcc/config/i386/i386.c
index 016d6e3..78dc720 100644
--- a/gcc/config/i386/i386.c
+++ b/gcc/config/i386/i386.c
@@ -40161,7 +40161,9 @@ ix86_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
the low 16 bytes are saved. */
static bool
-ix86_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode)
+ix86_hard_regno_call_part_clobbered (rtx_insn *insn ATTRIBUTE_UNUSED,
+ unsigned int regno,
+ machine_mode mode)
{
return SSE_REGNO_P (regno) && GET_MODE_SIZE (mode) > 16;
}
diff --git a/gcc/config/mips/mips.c b/gcc/config/mips/mips.c
index 95dc946..05a2ade 100644
--- a/gcc/config/mips/mips.c
+++ b/gcc/config/mips/mips.c
@@ -12906,7 +12906,9 @@ mips_hard_regno_scratch_ok (unsigned int regno)
registers with MODE > 64 bits are part clobbered too. */
static bool
-mips_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode)
+mips_hard_regno_call_part_clobbered (rtx_insn *insn ATTRIBUTE_UNUSED,
+ unsigned int regno,
+ machine_mode mode)
{
if (TARGET_FLOATXX
&& hard_regno_nregs (regno, mode) == 1
diff --git a/gcc/config/rs6000/rs6000.c b/gcc/config/rs6000/rs6000.c
index a257554..6d10d24 100644
--- a/gcc/config/rs6000/rs6000.c
+++ b/gcc/config/rs6000/rs6000.c
@@ -2197,7 +2197,9 @@ rs6000_modes_tieable_p (machine_mode mode1, machine_mode mode2)
/* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
static bool
-rs6000_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode)
+rs6000_hard_regno_call_part_clobbered (rtx_insn *insn ATTRIBUTE_UNUSED,
+ unsigned int regno,
+ machine_mode mode)
{
if (TARGET_32BIT
&& TARGET_POWERPC64
diff --git a/gcc/config/s390/s390.c b/gcc/config/s390/s390.c
index ea2be10..5f941d9 100644
--- a/gcc/config/s390/s390.c
+++ b/gcc/config/s390/s390.c
@@ -10098,7 +10098,9 @@ s390_hard_regno_scratch_ok (unsigned int regno)
bytes are saved across calls, however. */
static bool
-s390_hard_regno_call_part_clobbered (unsigned int regno, machine_mode mode)
+s390_hard_regno_call_part_clobbered (rtx_insn *insn ATTRIBUTE_UNUSED,
+ unsigned int regno,
+ machine_mode mode)
{
if (!TARGET_64BIT
&& TARGET_ZARCH
diff --git a/gcc/cselib.c b/gcc/cselib.c
index cef4bc0..84c17c2 100644
--- a/gcc/cselib.c
+++ b/gcc/cselib.c
@@ -2770,7 +2770,7 @@ cselib_process_insn (rtx_insn *insn)
if (call_used_regs[i]
|| (REG_VALUES (i) && REG_VALUES (i)->elt
&& (targetm.hard_regno_call_part_clobbered
- (i, GET_MODE (REG_VALUES (i)->elt->val_rtx)))))
+ (insn, i, GET_MODE (REG_VALUES (i)->elt->val_rtx)))))
cselib_invalidate_regno (i, reg_raw_mode[i]);
/* Since it is not clear how cselib is going to be used, be
diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
index edc0902..07171e5 100644
--- a/gcc/doc/tm.texi
+++ b/gcc/doc/tm.texi
@@ -1894,7 +1894,7 @@ of @code{CALL_USED_REGISTERS}.
@cindex call-used register
@cindex call-clobbered register
@cindex call-saved register
-@deftypefn {Target Hook} bool TARGET_HARD_REGNO_CALL_PART_CLOBBERED (unsigned int @var{regno}, machine_mode @var{mode})
+@deftypefn {Target Hook} bool TARGET_HARD_REGNO_CALL_PART_CLOBBERED (rtx_insn *@var{}, unsigned int @var{regno}, machine_mode @var{mode})
This hook should return true if @var{regno} is partly call-saved and
partly call-clobbered, and if a value of mode @var{mode} would be partly
clobbered by a call. For example, if the low 32 bits of @var{regno} are
@@ -1905,6 +1905,17 @@ The default implementation returns false, which is correct
for targets that don't have partly call-clobbered registers.
@end deftypefn
+@deftypefn {Target Hook} {rtx_insn *} TARGET_RETURN_CALL_WITH_MAX_CLOBBERS (rtx_insn *@var{call_1}, rtx_insn *@var{call_2})
+This hook returns a pointer to the call that partially clobbers the
+most registers. If a platform supports multiple ABIs where the registers
+that are partially clobbered may vary, this function compares two
+two calls and return a pointer to the one that clobbers the most registers.
+
+The registers clobbered in different ABIs must be a proper subset or
+superset of all other ABIs. @var{call_1} must always be a call insn,
+call_2 may be NULL or a call insn.
+@end deftypefn
+
@findex fixed_regs
@findex call_used_regs
@findex global_regs
@@ -2919,7 +2930,7 @@ the local anchor could be shared by other accesses to nearby locations.
The hook returns true if it succeeds, storing the offset of the
anchor from the base in @var{offset1} and the offset of the final address
-from the anchor in @var{offset2}. The default implementation returns false.
+from the anchor in @var{offset2}. ehe defnult implementation returns false.
@end deftypefn
@deftypefn {Target Hook} reg_class_t TARGET_SPILL_CLASS (reg_class_t, @var{machine_mode})
diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in
index 976a700..97a2ade 100644
--- a/gcc/doc/tm.texi.in
+++ b/gcc/doc/tm.texi.in
@@ -1707,6 +1707,8 @@ of @code{CALL_USED_REGISTERS}.
@cindex call-saved register
@hook TARGET_HARD_REGNO_CALL_PART_CLOBBERED
+@hook TARGET_RETURN_CALL_WITH_MAX_CLOBBERS
+
@findex fixed_regs
@findex call_used_regs
@findex global_regs
diff --git a/gcc/hooks.c b/gcc/hooks.c
index bbc35fc..f95659b 100644
--- a/gcc/hooks.c
+++ b/gcc/hooks.c
@@ -142,7 +142,7 @@ hook_bool_puint64_puint64_true (poly_uint64, poly_uint64)
/* Generic hook that takes (unsigned int, machine_mode) and returns false. */
bool
-hook_bool_uint_mode_false (unsigned int, machine_mode)
+hook_bool_insn_uint_mode_false (rtx_insn *, unsigned int, machine_mode)
{
return false;
}
diff --git a/gcc/hooks.h b/gcc/hooks.h
index 9e4bc29..dc6b2e1 100644
--- a/gcc/hooks.h
+++ b/gcc/hooks.h
@@ -40,7 +40,9 @@ extern bool hook_bool_const_rtx_insn_const_rtx_insn_true (const rtx_insn *,
extern bool hook_bool_mode_uhwi_false (machine_mode,
unsigned HOST_WIDE_INT);
extern bool hook_bool_puint64_puint64_true (poly_uint64, poly_uint64);
-extern bool hook_bool_uint_mode_false (unsigned int, machine_mode);
+extern bool hook_bool_insn_uint_mode_false (rtx_insn *,
+ unsigned int,
+ machine_mode);
extern bool hook_bool_uint_mode_true (unsigned int, machine_mode);
extern bool hook_bool_tree_false (tree);
extern bool hook_bool_const_tree_false (const_tree);
diff --git a/gcc/ira-conflicts.c b/gcc/ira-conflicts.c
index b57468b..b697e57 100644
--- a/gcc/ira-conflicts.c
+++ b/gcc/ira-conflicts.c
@@ -808,7 +808,8 @@ ira_build_conflicts (void)
regs must conflict with them. */
for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
if (!TEST_HARD_REG_BIT (call_used_reg_set, regno)
- && targetm.hard_regno_call_part_clobbered (regno,
+ && targetm.hard_regno_call_part_clobbered (NULL,
+ regno,
obj_mode))
{
SET_HARD_REG_BIT (OBJECT_CONFLICT_HARD_REGS (obj), regno);
diff --git a/gcc/ira-costs.c b/gcc/ira-costs.c
index e5d8804..7f60712 100644
--- a/gcc/ira-costs.c
+++ b/gcc/ira-costs.c
@@ -2379,7 +2379,8 @@ ira_tune_allocno_costs (void)
*crossed_calls_clobber_regs)
&& (ira_hard_reg_set_intersection_p (regno, mode,
call_used_reg_set)
- || targetm.hard_regno_call_part_clobbered (regno,
+ || targetm.hard_regno_call_part_clobbered (NULL,
+ regno,
mode)))
cost += (ALLOCNO_CALL_FREQ (a)
* (ira_memory_move_cost[mode][rclass][0]
diff --git a/gcc/lra-constraints.c b/gcc/lra-constraints.c
index 7ffcd35..31a567a 100644
--- a/gcc/lra-constraints.c
+++ b/gcc/lra-constraints.c
@@ -5368,16 +5368,24 @@ inherit_reload_reg (bool def_p, int original_regno,
static inline bool
need_for_call_save_p (int regno)
{
+ machine_mode pmode = PSEUDO_REGNO_MODE (regno);
+ int new_regno = reg_renumber[regno];
+
lra_assert (regno >= FIRST_PSEUDO_REGISTER && reg_renumber[regno] >= 0);
- return (usage_insns[regno].calls_num < calls_num
- && (overlaps_hard_reg_set_p
- ((flag_ipa_ra &&
- ! hard_reg_set_empty_p (lra_reg_info[regno].actual_call_used_reg_set))
- ? lra_reg_info[regno].actual_call_used_reg_set
- : call_used_reg_set,
- PSEUDO_REGNO_MODE (regno), reg_renumber[regno])
- || (targetm.hard_regno_call_part_clobbered
- (reg_renumber[regno], PSEUDO_REGNO_MODE (regno)))));
+
+ if (usage_insns[regno].calls_num >= calls_num)
+ return false;
+
+ if (flag_ipa_ra
+ && !hard_reg_set_empty_p (lra_reg_info[regno].actual_call_used_reg_set))
+ return (overlaps_hard_reg_set_p
+ (lra_reg_info[regno].actual_call_used_reg_set, pmode, new_regno)
+ || targetm.hard_regno_call_part_clobbered
+ (lra_reg_info[regno].call_insn, new_regno, pmode));
+ else
+ return (overlaps_hard_reg_set_p (call_used_reg_set, pmode, new_regno)
+ || targetm.hard_regno_call_part_clobbered
+ (lra_reg_info[regno].call_insn, new_regno, pmode));
}
/* Global registers occurring in the current EBB. */
diff --git a/gcc/lra-int.h b/gcc/lra-int.h
index 9d9e81d..ccc7b00 100644
--- a/gcc/lra-int.h
+++ b/gcc/lra-int.h
@@ -117,6 +117,8 @@ struct lra_reg
/* This member is set up in lra-lives.c for subsequent
assignments. */
lra_copy_t copies;
+ /* Call instruction that may affect this register. */
+ rtx_insn *call_insn;
};
/* References to the common info about each register. */
diff --git a/gcc/lra-lives.c b/gcc/lra-lives.c
index 7b60691..fafb9e3 100644
--- a/gcc/lra-lives.c
+++ b/gcc/lra-lives.c
@@ -579,7 +579,8 @@ lra_setup_reload_pseudo_preferenced_hard_reg (int regno,
PSEUDOS_LIVE_THROUGH_CALLS and PSEUDOS_LIVE_THROUGH_SETJUMPS. */
static inline void
check_pseudos_live_through_calls (int regno,
- HARD_REG_SET last_call_used_reg_set)
+ HARD_REG_SET last_call_used_reg_set,
+ rtx_insn *call_insn)
{
int hr;
@@ -590,7 +591,8 @@ check_pseudos_live_through_calls (int regno,
last_call_used_reg_set);
for (hr = 0; HARD_REGISTER_NUM_P (hr); hr++)
- if (targetm.hard_regno_call_part_clobbered (hr,
+ if (targetm.hard_regno_call_part_clobbered (call_insn,
+ hr,
PSEUDO_REGNO_MODE (regno)))
add_to_hard_reg_set (&lra_reg_info[regno].conflict_hard_regs,
PSEUDO_REGNO_MODE (regno), hr);
@@ -635,6 +637,7 @@ process_bb_lives (basic_block bb, int &curr_point, bool dead_insn_p)
rtx link, *link_loc;
bool need_curr_point_incr;
HARD_REG_SET last_call_used_reg_set;
+ rtx_insn *call_insn;
reg_live_out = df_get_live_out (bb);
sparseset_clear (pseudos_live);
@@ -658,6 +661,17 @@ process_bb_lives (basic_block bb, int &curr_point, bool dead_insn_p)
if (lra_dump_file != NULL)
fprintf (lra_dump_file, " BB %d\n", bb->index);
+ call_insn = NULL;
+ if (targetm.return_call_with_max_clobbers)
+ {
+ FOR_BB_INSNS_REVERSE_SAFE (bb, curr_insn, next)
+ {
+ if (CALL_P (curr_insn))
+ call_insn = targetm.return_call_with_max_clobbers (curr_insn,
+ call_insn);
+ }
+ }
+
/* Scan the code of this basic block, noting which pseudos and hard
regs are born or die.
@@ -847,7 +861,8 @@ process_bb_lives (basic_block bb, int &curr_point, bool dead_insn_p)
update_pseudo_point (reg->regno, curr_point, USE_POINT);
mark_regno_live (reg->regno, reg->biggest_mode);
check_pseudos_live_through_calls (reg->regno,
- last_call_used_reg_set);
+ last_call_used_reg_set,
+ call_insn);
}
if (!HARD_REGISTER_NUM_P (reg->regno))
@@ -912,9 +927,13 @@ process_bb_lives (basic_block bb, int &curr_point, bool dead_insn_p)
{
IOR_HARD_REG_SET (lra_reg_info[j].actual_call_used_reg_set,
this_call_used_reg_set);
+
+ lra_reg_info[j].call_insn = curr_insn;
+
if (flush)
- check_pseudos_live_through_calls
- (j, last_call_used_reg_set);
+ check_pseudos_live_through_calls (j,
+ last_call_used_reg_set,
+ call_insn);
}
COPY_HARD_REG_SET(last_call_used_reg_set, this_call_used_reg_set);
}
@@ -956,7 +975,8 @@ process_bb_lives (basic_block bb, int &curr_point, bool dead_insn_p)
update_pseudo_point (reg->regno, curr_point, USE_POINT);
mark_regno_live (reg->regno, reg->biggest_mode);
check_pseudos_live_through_calls (reg->regno,
- last_call_used_reg_set);
+ last_call_used_reg_set,
+ call_insn);
}
for (reg = curr_static_id->hard_regs; reg != NULL; reg = reg->next)
@@ -1125,7 +1145,7 @@ process_bb_lives (basic_block bb, int &curr_point, bool dead_insn_p)
if (sparseset_cardinality (pseudos_live_through_calls) == 0)
break;
if (sparseset_bit_p (pseudos_live_through_calls, j))
- check_pseudos_live_through_calls (j, last_call_used_reg_set);
+ check_pseudos_live_through_calls (j, last_call_used_reg_set, call_insn);
}
for (i = 0; HARD_REGISTER_NUM_P (i); ++i)
diff --git a/gcc/lra.c b/gcc/lra.c
index 75ee742..b0e999f 100644
--- a/gcc/lra.c
+++ b/gcc/lra.c
@@ -1344,6 +1344,7 @@ initialize_lra_reg_info_element (int i)
lra_reg_info[i].val = get_new_reg_value ();
lra_reg_info[i].offset = 0;
lra_reg_info[i].copies = NULL;
+ lra_reg_info[i].call_insn = NULL;
}
/* Initialize common reg info and copies. */
diff --git a/gcc/regcprop.c b/gcc/regcprop.c
index b107ea2..e6bdeb0 100644
--- a/gcc/regcprop.c
+++ b/gcc/regcprop.c
@@ -1054,7 +1054,7 @@ copyprop_hardreg_forward_1 (basic_block bb, struct value_data *vd)
for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
if ((TEST_HARD_REG_BIT (regs_invalidated_by_this_call, regno)
|| (targetm.hard_regno_call_part_clobbered
- (regno, vd->e[regno].mode)))
+ (insn, regno, vd->e[regno].mode)))
&& (regno < set_regno || regno >= set_regno + set_nregs))
kill_value_regno (regno, 1, vd);
diff --git a/gcc/reginfo.c b/gcc/reginfo.c
index 7a7fa4d..315c5ec 100644
--- a/gcc/reginfo.c
+++ b/gcc/reginfo.c
@@ -639,7 +639,7 @@ choose_hard_reg_mode (unsigned int regno ATTRIBUTE_UNUSED,
if (hard_regno_nregs (regno, mode) == nregs
&& targetm.hard_regno_mode_ok (regno, mode)
&& (!call_saved
- || !targetm.hard_regno_call_part_clobbered (regno, mode))
+ || !targetm.hard_regno_call_part_clobbered (NULL, regno, mode))
&& maybe_gt (GET_MODE_SIZE (mode), GET_MODE_SIZE (found_mode)))
found_mode = mode;
@@ -647,7 +647,7 @@ choose_hard_reg_mode (unsigned int regno ATTRIBUTE_UNUSED,
if (hard_regno_nregs (regno, mode) == nregs
&& targetm.hard_regno_mode_ok (regno, mode)
&& (!call_saved
- || !targetm.hard_regno_call_part_clobbered (regno, mode))
+ || !targetm.hard_regno_call_part_clobbered (NULL, regno, mode))
&& maybe_gt (GET_MODE_SIZE (mode), GET_MODE_SIZE (found_mode)))
found_mode = mode;
@@ -655,7 +655,7 @@ choose_hard_reg_mode (unsigned int regno ATTRIBUTE_UNUSED,
if (hard_regno_nregs (regno, mode) == nregs
&& targetm.hard_regno_mode_ok (regno, mode)
&& (!call_saved
- || !targetm.hard_regno_call_part_clobbered (regno, mode))
+ || !targetm.hard_regno_call_part_clobbered (NULL, regno, mode))
&& maybe_gt (GET_MODE_SIZE (mode), GET_MODE_SIZE (found_mode)))
found_mode = mode;
@@ -663,7 +663,7 @@ choose_hard_reg_mode (unsigned int regno ATTRIBUTE_UNUSED,
if (hard_regno_nregs (regno, mode) == nregs
&& targetm.hard_regno_mode_ok (regno, mode)
&& (!call_saved
- || !targetm.hard_regno_call_part_clobbered (regno, mode))
+ || !targetm.hard_regno_call_part_clobbered (NULL, regno, mode))
&& maybe_gt (GET_MODE_SIZE (mode), GET_MODE_SIZE (found_mode)))
found_mode = mode;
@@ -677,7 +677,7 @@ choose_hard_reg_mode (unsigned int regno ATTRIBUTE_UNUSED,
if (hard_regno_nregs (regno, mode) == nregs
&& targetm.hard_regno_mode_ok (regno, mode)
&& (!call_saved
- || !targetm.hard_regno_call_part_clobbered (regno, mode)))
+ || !targetm.hard_regno_call_part_clobbered (NULL, regno, mode)))
return mode;
}
diff --git a/gcc/regrename.c b/gcc/regrename.c
index a180ced..109add0 100644
--- a/gcc/regrename.c
+++ b/gcc/regrename.c
@@ -339,9 +339,9 @@ check_new_reg_p (int reg ATTRIBUTE_UNUSED, int new_reg,
&& ! DEBUG_INSN_P (tmp->insn))
|| (this_head->need_caller_save_reg
&& ! (targetm.hard_regno_call_part_clobbered
- (reg, GET_MODE (*tmp->loc)))
+ (tmp->insn, reg, GET_MODE (*tmp->loc)))
&& (targetm.hard_regno_call_part_clobbered
- (new_reg, GET_MODE (*tmp->loc)))))
+ (tmp->insn, new_reg, GET_MODE (*tmp->loc)))))
return false;
return true;
diff --git a/gcc/reload.c b/gcc/reload.c
index 6cfd5e2..0cc82d0 100644
--- a/gcc/reload.c
+++ b/gcc/reload.c
@@ -6912,13 +6912,16 @@ find_equiv_reg (rtx goal, rtx_insn *insn, enum reg_class rclass, int other,
if (regno >= 0 && regno < FIRST_PSEUDO_REGISTER)
for (i = 0; i < nregs; ++i)
if (call_used_regs[regno + i]
- || targetm.hard_regno_call_part_clobbered (regno + i, mode))
+ || targetm.hard_regno_call_part_clobbered (p,
+ regno + i,
+ mode))
return 0;
if (valueno >= 0 && valueno < FIRST_PSEUDO_REGISTER)
for (i = 0; i < valuenregs; ++i)
if (call_used_regs[valueno + i]
- || targetm.hard_regno_call_part_clobbered (valueno + i,
+ || targetm.hard_regno_call_part_clobbered (p,
+ valueno + i,
mode))
return 0;
}
diff --git a/gcc/reload1.c b/gcc/reload1.c
index b703402..5490ae5 100644
--- a/gcc/reload1.c
+++ b/gcc/reload1.c
@@ -8289,7 +8289,8 @@ emit_reload_insns (struct insn_chain *chain)
: out_regno + k);
reg_reloaded_insn[regno + k] = insn;
SET_HARD_REG_BIT (reg_reloaded_valid, regno + k);
- if (targetm.hard_regno_call_part_clobbered (regno + k,
+ if (targetm.hard_regno_call_part_clobbered (insn,
+ regno + k,
mode))
SET_HARD_REG_BIT (reg_reloaded_call_part_clobbered,
regno + k);
@@ -8369,7 +8370,8 @@ emit_reload_insns (struct insn_chain *chain)
: in_regno + k);
reg_reloaded_insn[regno + k] = insn;
SET_HARD_REG_BIT (reg_reloaded_valid, regno + k);
- if (targetm.hard_regno_call_part_clobbered (regno + k,
+ if (targetm.hard_regno_call_part_clobbered (insn,
+ regno + k,
mode))
SET_HARD_REG_BIT (reg_reloaded_call_part_clobbered,
regno + k);
@@ -8485,7 +8487,7 @@ emit_reload_insns (struct insn_chain *chain)
CLEAR_HARD_REG_BIT (reg_reloaded_dead, src_regno + k);
SET_HARD_REG_BIT (reg_reloaded_valid, src_regno + k);
if (targetm.hard_regno_call_part_clobbered
- (src_regno + k, mode))
+ (insn, src_regno + k, mode))
SET_HARD_REG_BIT (reg_reloaded_call_part_clobbered,
src_regno + k);
else
diff --git a/gcc/sched-deps.c b/gcc/sched-deps.c
index e15cf08..53c2e26 100644
--- a/gcc/sched-deps.c
+++ b/gcc/sched-deps.c
@@ -3728,7 +3728,8 @@ deps_analyze_insn (struct deps_desc *deps, rtx_insn *insn)
Since we only have a choice between 'might be clobbered'
and 'definitely not clobbered', we must include all
partly call-clobbered registers here. */
- else if (targetm.hard_regno_call_part_clobbered (i,
+ else if (targetm.hard_regno_call_part_clobbered (insn,
+ i,
reg_raw_mode[i])
|| TEST_HARD_REG_BIT (regs_invalidated_by_call, i))
SET_REGNO_REG_SET (reg_pending_clobbers, i);
diff --git a/gcc/sel-sched.c b/gcc/sel-sched.c
index 2bae6ef..c6b4593 100644
--- a/gcc/sel-sched.c
+++ b/gcc/sel-sched.c
@@ -1102,7 +1102,7 @@ init_regs_for_mode (machine_mode mode)
if (i >= 0)
continue;
- if (targetm.hard_regno_call_part_clobbered (cur_reg, mode))
+ if (targetm.hard_regno_call_part_clobbered (NULL, cur_reg, mode))
SET_HARD_REG_BIT (sel_hrd.regs_for_call_clobbered[mode],
cur_reg);
@@ -1251,7 +1251,7 @@ mark_unavailable_hard_regs (def_t def, struct reg_rename *reg_rename_p,
/* Exclude registers that are partially call clobbered. */
if (def->crosses_call
- && !targetm.hard_regno_call_part_clobbered (regno, mode))
+ && !targetm.hard_regno_call_part_clobbered (NULL, regno, mode))
AND_COMPL_HARD_REG_SET (reg_rename_p->available_for_renaming,
sel_hrd.regs_for_call_clobbered[mode]);
diff --git a/gcc/target.def b/gcc/target.def
index e8f0f70..ecb0ea7 100644
--- a/gcc/target.def
+++ b/gcc/target.def
@@ -5772,8 +5772,21 @@ return true for a 64-bit mode but false for a 32-bit mode.\n\
\n\
The default implementation returns false, which is correct\n\
for targets that don't have partly call-clobbered registers.",
- bool, (unsigned int regno, machine_mode mode),
- hook_bool_uint_mode_false)
+ bool, (rtx_insn *, unsigned int regno, machine_mode mode),
+ hook_bool_insn_uint_mode_false)
+
+DEFHOOK
+(return_call_with_max_clobbers,
+ "This hook returns a pointer to the call that partially clobbers the\n\
+most registers. If a platform supports multiple ABIs where the registers\n\
+that are partially clobbered may vary, this function compares two\n\
+two calls and return a pointer to the one that clobbers the most registers.\n\
+\n\
+The registers clobbered in different ABIs must be a proper subset or\n\
+superset of all other ABIs. @var{call_1} must always be a call insn,\n\
+call_2 may be NULL or a call insn.",
+ rtx_insn *, (rtx_insn *call_1, rtx_insn *call_2),
+ NULL)
/* Return the smallest number of different values for which it is best to
use a jump-table instead of a tree of conditional branches. */
diff --git a/gcc/targhooks.c b/gcc/targhooks.c
index 898848f..2cbdc4a 100644
--- a/gcc/targhooks.c
+++ b/gcc/targhooks.c
@@ -1930,7 +1930,7 @@ default_dwarf_frame_reg_mode (int regno)
{
machine_mode save_mode = reg_raw_mode[regno];
- if (targetm.hard_regno_call_part_clobbered (regno, save_mode))
+ if (targetm.hard_regno_call_part_clobbered (NULL, regno, save_mode))
save_mode = choose_hard_reg_mode (regno, 1, true);
return save_mode;
}