The padding bits of _BitInt(N) are undefined in the x86-64 and
the aarch64 ABI. In general, the current lowering and expand process
perform truncations when taking a _BitInt value as input,
while leaving them as-is in the output.
By adding truncation on the output side, we can define psABIs
(e.g. what we are planning for LoongArch) that has _BitInts extended.
Also, TARGET_PROMOTE_FUNCTION_MODE and PROMOTE_MODE are also expected
to be working for small _BitInt types so that they can behave just
like the traditional integral types of C when passed around in
registers.
gcc/c-family/ChangeLog:
* c-common.cc (resolve_overloaded_atomic_exchange): Truncate
_BitInt values before atomic store.
(resolve_overloaded_atomic_compare_exchange): Same.
gcc/ChangeLog:
* explow.cc (promote_function_mode): Allow _BitInt types
to be promoted.
(promote_mode): Same.
* expr.cc (expand_expr_real_1): Do not truncate _BitInts
from ABI bondaries if the target sets the "extended" flag.
(EXTEND_BITINT): Same.
* gimple-lower-bitint.cc (struct bitint_large_huge):
Access the highest-order limb of a large/huge _BitInt using limb
type rather than a new type with reduced precision if _BitInt(N)
is extended by definition.
(bitint_large_huge::limb_access_type): Same.
(bitint_large_huge::handle_plus_minus_1): Truncate after
operation if needed.
(bitint_large_huge::handle_lshift): Same.
(bitint_large_huge::handle_cast): Same.
(bitint_large_huge::handle_load): Same.
(bitint_large_huge::handle_stmt): Same.
(bitint_large_huge::lower_mergeable_stmt): Same.
(bitint_large_huge::lower_shift_stmt): Same.
(bitint_large_huge::lower_muldiv_stmt): Same.
(bitint_large_huge::lower_float_conv_stmt): Same.
(bitint_large_huge::finish_arith_overflow): Same.
(bitint_large_huge::lower_addsub_overflow): Same.
(bitint_large_huge::lower_stmt): Same.
---
gcc/c-family/c-common.cc | 65 ++++++-
gcc/explow.cc | 32 +++-
gcc/expr.cc | 23 ++-
gcc/gimple-lower-bitint.cc | 365 ++++++++++++++++++++++++++++++++++---
4 files changed, 444 insertions(+), 41 deletions(-)
diff --git a/gcc/c-family/c-common.cc b/gcc/c-family/c-common.cc
index 587d76461e9..ed43fbc5856 100644
--- a/gcc/c-family/c-common.cc
+++ b/gcc/c-family/c-common.cc
@@ -8033,11 +8033,36 @@ resolve_overloaded_atomic_exchange (location_t loc,
tree function,
/* Convert new value to required type, and dereference it.
If *p1 type can have padding or may involve floating point which
could e.g. be promoted to wider precision and demoted afterwards,
- state of padding bits might not be preserved. */
+ state of padding bits might not be preserved.
+
+ However, as a special case, we still want to preserve the padding
+ bits of _BitInt values if the ABI requires them to be extended in
+ memory. */
+
build_indirect_ref (loc, p1, RO_UNARY_STAR);
- p1 = build2_loc (loc, MEM_REF, I_type,
- build1 (VIEW_CONVERT_EXPR, I_type_ptr, p1),
- build_zero_cst (TREE_TYPE (p1)));
+
+ tree p1type = TREE_TYPE (p1);
+ bool bitint_extended_p = false;
+ if (TREE_CODE (TREE_TYPE (p1type)) == BITINT_TYPE)
+ {
+ struct bitint_info info;
+ unsigned prec = TYPE_PRECISION (TREE_TYPE (p1type));
+ targetm.c.bitint_type_info (prec, &info);
+ bitint_extended_p = info.extended;
+ }
+
+ if (bitint_extended_p)
+ p1 = build1_loc (loc, CONVERT_EXPR, I_type,
+ build2_loc (loc, MEM_REF, TREE_TYPE (p1type),
+ p1, build_zero_cst (p1type)));
+
+ /* Otherwise, the padding bits might not be preserved, as stated above. */
+ else
+ p1 = build2_loc (loc, MEM_REF, I_type,
+ build1 (VIEW_CONVERT_EXPR, I_type_ptr, p1),
+ build_zero_cst (p1type));
+
+
(*params)[1] = p1;
/* Move memory model to the 3rd position, and end param list. */
@@ -8117,11 +8142,35 @@ resolve_overloaded_atomic_compare_exchange (location_t
loc, tree function,
/* Convert desired value to required type, and dereference it.
If *p2 type can have padding or may involve floating point which
could e.g. be promoted to wider precision and demoted afterwards,
- state of padding bits might not be preserved. */
+ state of padding bits might not be preserved.
+
+ However, as a special case, we still want to preserve the padding
+ bits of _BitInt values if the ABI requires them to be extended in
+ memory. */
+
build_indirect_ref (loc, p2, RO_UNARY_STAR);
- p2 = build2_loc (loc, MEM_REF, I_type,
- build1 (VIEW_CONVERT_EXPR, I_type_ptr, p2),
- build_zero_cst (TREE_TYPE (p2)));
+
+ tree p2type = TREE_TYPE (p2);
+ bool bitint_extended_p = false;
+ if (TREE_CODE (TREE_TYPE (p2type)) == BITINT_TYPE)
+ {
+ struct bitint_info info;
+ unsigned prec = TYPE_PRECISION (TREE_TYPE (p2type));
+ targetm.c.bitint_type_info (prec, &info);
+ bitint_extended_p = info.extended;
+ }
+
+ if (bitint_extended_p)
+ p2 = build1_loc (loc, CONVERT_EXPR, I_type,
+ build2_loc (loc, MEM_REF, TREE_TYPE (p2type),
+ p2, build_zero_cst (p2type)));
+
+ /* Otherwise, the padding bits might not be preserved, as stated above. */
+ else
+ p2 = build2_loc (loc, MEM_REF, I_type,
+ build1 (VIEW_CONVERT_EXPR, I_type_ptr, p2),
+ build_zero_cst (p2type));
+
(*params)[2] = p2;
/* The rest of the parameters are fine. NULL means no special return value
diff --git a/gcc/explow.cc b/gcc/explow.cc
index 7799a98053b..4d4dfd26c9d 100644
--- a/gcc/explow.cc
+++ b/gcc/explow.cc
@@ -852,11 +852,26 @@ promote_function_mode (const_tree type, machine_mode
mode, int *punsignedp,
return mode;
}
+ /* Handle _BitInt(N) that does not require promotion. */
+ if (TREE_CODE (type) == BITINT_TYPE)
+ {
+ if (TYPE_MODE (type) == BLKmode)
+ return mode;
+
+ struct bitint_info info;
+ bool ok = targetm.c.bitint_type_info (TYPE_PRECISION (type), &info);
+ gcc_assert (ok);
+
+ if (!info.extended)
+ return mode;
+ }
+
+
switch (TREE_CODE (type))
{
case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE:
case REAL_TYPE: case OFFSET_TYPE: case FIXED_POINT_TYPE:
- case POINTER_TYPE: case REFERENCE_TYPE:
+ case POINTER_TYPE: case REFERENCE_TYPE: case BITINT_TYPE:
return targetm.calls.promote_function_mode (type, mode, punsignedp,
funtype,
for_return);
@@ -891,10 +906,25 @@ promote_mode (const_tree type ATTRIBUTE_UNUSED,
machine_mode mode,
code = TREE_CODE (type);
unsignedp = *punsignedp;
+ /* Handle _BitInt(N) that does not require promotion. */
+ if (code == BITINT_TYPE)
+ {
+ if (TYPE_MODE (type) == BLKmode)
+ return mode;
+
+ struct bitint_info info;
+ bool ok = targetm.c.bitint_type_info (TYPE_PRECISION (type), &info);
+ gcc_assert (ok);
+
+ if (!info.extended)
+ return mode;
+ }
+
switch (code)
{
case INTEGER_TYPE: case ENUMERAL_TYPE: case BOOLEAN_TYPE:
case REAL_TYPE: case OFFSET_TYPE: case FIXED_POINT_TYPE:
+ case BITINT_TYPE:
/* Values of these types always have scalar mode. */
smode = as_a <scalar_mode> (mode);
PROMOTE_MODE (smode, unsignedp, type);
diff --git a/gcc/expr.cc b/gcc/expr.cc
index 3815c565e2d..c2dd53aedcc 100644
--- a/gcc/expr.cc
+++ b/gcc/expr.cc
@@ -11237,6 +11237,10 @@ expand_expr_real_1 (tree exp, rtx target, machine_mode
tmode,
tree ssa_name = NULL_TREE;
gimple *g;
+ type = TREE_TYPE (exp);
+ mode = TYPE_MODE (type);
+ unsignedp = TYPE_UNSIGNED (type);
+
/* Some ABIs define padding bits in _BitInt uninitialized. Normally, RTL
expansion sign/zero extends integral types with less than mode precision
when reading from bit-fields and after arithmetic operations (see
@@ -11247,8 +11251,10 @@ expand_expr_real_1 (tree exp, rtx target, machine_mode
tmode,
objects in memory, or function arguments, return value). Because we
internally extend after arithmetic operations, we can avoid doing that
when reading from SSA_NAMEs of vars. */
+
#define EXTEND_BITINT(expr) \
((TREE_CODE (type) == BITINT_TYPE \
+ && !bitint_type_info.extended \
&& reduce_bit_field
\
&& mode != BLKmode \
&& modifier != EXPAND_MEMORY \
@@ -11257,9 +11263,13 @@ expand_expr_real_1 (tree exp, rtx target, machine_mode
tmode,
&& modifier != EXPAND_CONST_ADDRESS) \
? reduce_to_bit_field_precision ((expr), NULL_RTX, type) : (expr))
- type = TREE_TYPE (exp);
- mode = TYPE_MODE (type);
- unsignedp = TYPE_UNSIGNED (type);
+ struct bitint_info bitint_type_info;
+ if (TREE_CODE (type) == BITINT_TYPE)
+ {
+ bool ok = targetm.c.bitint_type_info (TYPE_PRECISION (type),
+ &bitint_type_info);
+ gcc_assert (ok);
+ }
treeop0 = treeop1 = treeop2 = NULL_TREE;
if (!VL_EXP_CLASS_P (exp))
@@ -11595,12 +11605,9 @@ expand_expr_real_1 (tree exp, rtx target, machine_mode
tmode,
{
if (TREE_CODE (type) == BITINT_TYPE)
{
- unsigned int prec = TYPE_PRECISION (type);
- struct bitint_info info;
- bool ok = targetm.c.bitint_type_info (prec, &info);
- gcc_assert (ok);
scalar_int_mode limb_mode
- = as_a <scalar_int_mode> (info.limb_mode);
+ = as_a <scalar_int_mode> (bitint_type_info.limb_mode);
+ unsigned int prec = TYPE_PRECISION (type);
unsigned int limb_prec = GET_MODE_PRECISION (limb_mode);
if (prec > limb_prec && prec > MAX_FIXED_MODE_SIZE)
{
diff --git a/gcc/gimple-lower-bitint.cc b/gcc/gimple-lower-bitint.cc
index 6fefc834762..5240ff09e11 100644
--- a/gcc/gimple-lower-bitint.cc
+++ b/gcc/gimple-lower-bitint.cc
@@ -425,7 +425,7 @@ struct bitint_large_huge
~bitint_large_huge ();
void insert_before (gimple *);
- tree limb_access_type (tree, tree);
+ tree limb_access_type (tree, tree, bool = false);
tree limb_access (tree, tree, tree, bool);
tree build_bit_field_ref (tree, tree, unsigned HOST_WIDE_INT,
unsigned HOST_WIDE_INT);
@@ -438,6 +438,7 @@ struct bitint_large_huge
tree prepare_data_in_out (tree, tree, tree *, tree = NULL_TREE);
tree add_cast (tree, tree);
tree handle_plus_minus (tree_code, tree, tree, tree);
+ tree handle_plus_minus_1 (tree_code, tree, tree, tree);
tree handle_lshift (tree, tree, tree);
tree handle_cast (tree, tree, tree);
tree handle_bit_field_ref (tree, tree);
@@ -553,6 +554,7 @@ struct bitint_large_huge
unsigned m_upwards_2limb;
bool m_upwards;
bool m_cast_conditional;
+ bool m_force_extend;
unsigned m_bitfld_load;
vec<tree> m_data;
unsigned int m_data_cnt;
@@ -578,7 +580,7 @@ bitint_large_huge::~bitint_large_huge ()
void
bitint_large_huge::insert_before (gimple *g)
{
- gimple_set_location (g, m_loc);
+ gimple_set_location (g, m_loc) ;
gsi_insert_before (&m_gsi, g, GSI_SAME_STMT);
}
@@ -587,18 +589,34 @@ bitint_large_huge::insert_before (gimple *g)
significant limb if any. */
tree
-bitint_large_huge::limb_access_type (tree type, tree idx)
+bitint_large_huge::limb_access_type (tree type, tree idx,
+ bool force_extend)
{
if (type == NULL_TREE)
return m_limb_type;
+
+ /* Either force_extend or m_force_extend should be set
+ when we want this function to return the type of the
+ truncated partial (high) limb. */
+ force_extend |= m_force_extend;
+
unsigned HOST_WIDE_INT i = tree_to_uhwi (idx);
unsigned int prec = TYPE_PRECISION (type);
gcc_assert (i * limb_prec < prec);
if ((i + 1) * limb_prec <= prec)
return m_limb_type;
else
- return build_nonstandard_integer_type (prec % limb_prec,
- TYPE_UNSIGNED (type));
+ {
+ struct bitint_info info;
+ bool ok = targetm.c.bitint_type_info (prec, &info);
+ gcc_assert (ok);
+
+ if (info.extended && !force_extend)
+ return m_limb_type;
+ else
+ return build_nonstandard_integer_type (prec % limb_prec,
+ TYPE_UNSIGNED (type));
+ }
}
/* Return a tree how to access limb IDX of VAR corresponding to BITINT_TYPE
@@ -1126,6 +1144,42 @@ bitint_large_huge::add_cast (tree type, tree val)
tree
bitint_large_huge::handle_plus_minus (tree_code code, tree rhs1, tree rhs2,
tree idx)
+{
+ /* Truncate the result if the target ABI requires so. */
+ struct bitint_info info;
+ bool ok = targetm.c.bitint_type_info (
+ TYPE_PRECISION (rhs1 == NULL_TREE
+ ? TREE_TYPE (rhs2) : TREE_TYPE (rhs1)),
+ &info);
+
+ gcc_assert (ok);
+
+ bool m_force_extend_save = m_force_extend;
+ if (info.extended)
+ m_force_extend = true;
+
+ rhs2 = handle_operand (rhs2, idx);
+ if (rhs1 == NULL_TREE)
+ rhs1 = build_zero_cst (TREE_TYPE (rhs2));
+ else
+ rhs1 = handle_operand (rhs1, idx);
+
+ if (info.extended)
+ m_force_extend = m_force_extend_save;
+
+ tree ret = handle_plus_minus_1 (code, rhs1, rhs2, idx);
+
+ if (info.extended && !m_force_extend
+ && !types_compatible_p (TREE_TYPE (ret), m_limb_type))
+ ret = add_cast (m_limb_type, ret);
+
+ return ret;
+}
+
+
+tree
+bitint_large_huge::handle_plus_minus_1 (tree_code code, tree rhs1, tree rhs2,
+ tree idx)
{
tree lhs, data_out, ctype;
tree rhs1_type = TREE_TYPE (rhs1);
@@ -1234,9 +1288,27 @@ bitint_large_huge::handle_lshift (tree rhs1, tree rhs2,
tree idx)
{
unsigned HOST_WIDE_INT cnt = tree_to_uhwi (rhs2);
gcc_checking_assert (cnt < (unsigned) limb_prec);
+
if (cnt == 0)
return rhs1;
+ /* Truncate the result if the target ABI requires so. */
+ struct bitint_info info;
+ bool ok = targetm.c.bitint_type_info (TYPE_PRECISION (TREE_TYPE (rhs1)),
+ &info);
+ gcc_assert (ok);
+
+ if (info.extended)
+ {
+ bool m_force_extend_save = m_force_extend;
+ m_force_extend = true;
+ rhs1 = handle_operand (rhs1, idx);
+ m_force_extend = m_force_extend_save;
+ }
+ else
+ rhs1 = handle_operand (rhs1, idx);
+
+
tree lhs, data_out, rhs1_type = TREE_TYPE (rhs1);
gimple *g;
tree data_in = prepare_data_in_out (build_zero_cst (m_limb_type), idx,
@@ -1275,6 +1347,15 @@ bitint_large_huge::handle_lshift (tree rhs1, tree rhs2,
tree idx)
}
else
lhs = data_in;
+
+ if (info.extended && !m_force_extend
+ && !types_compatible_p (rhs1_type, m_limb_type))
+ {
+ /* Restore the type to full limb type after downcasting,
+ so that it can compute with _BitInt operands of other sources. */
+ lhs = add_cast (m_limb_type, lhs);
+ }
+
m_data[m_data_cnt] = data_out;
m_data_cnt += 2;
return lhs;
@@ -1286,6 +1367,10 @@ bitint_large_huge::handle_lshift (tree rhs1, tree rhs2,
tree idx)
tree
bitint_large_huge::handle_cast (tree lhs_type, tree rhs1, tree idx)
{
+ struct bitint_info info;
+ bool ok = targetm.c.bitint_type_info (0, &info);
+ gcc_assert (ok);
+
tree rhs_type = TREE_TYPE (rhs1);
gimple *g;
if ((TREE_CODE (rhs1) == SSA_NAME || TREE_CODE (rhs1) == INTEGER_CST)
@@ -1315,9 +1400,20 @@ bitint_large_huge::handle_cast (tree lhs_type, tree
rhs1, tree idx)
rhs1 = handle_operand (rhs1, idx);
if (tree_fits_uhwi_p (idx))
{
- tree type = limb_access_type (lhs_type, idx);
+ tree type = limb_access_type (lhs_type, idx, true);
if (!types_compatible_p (type, TREE_TYPE (rhs1)))
rhs1 = add_cast (type, rhs1);
+
+ /* Restore the type to full limb type after downcasting,
+ so that it can compute with _BitInt operands of other sources.
+
+ However, we want to avoid this if handle_cast is called during
+ handle_operand when m_force_extend is set, because it needs to
+ behave exactly the same as the non-"info.extended" case. */
+
+ if (info.extended && !m_force_extend
+ && !types_compatible_p (type, m_limb_type))
+ rhs1 = add_cast (m_limb_type, rhs1);
}
return rhs1;
}
@@ -1595,9 +1691,14 @@ bitint_large_huge::handle_cast (tree lhs_type, tree
rhs1, tree idx)
else
t = m_data[save_data_cnt + 1];
}
- tree type = limb_access_type (lhs_type, idx);
+ tree type = limb_access_type (lhs_type, idx, true);
if (!useless_type_conversion_p (type, m_limb_type))
t = add_cast (type, t);
+
+ if (info.extended && !m_force_extend
+ && !types_compatible_p (type, m_limb_type))
+ t = add_cast (m_limb_type, t);
+
m_first = save_first;
return t;
}
@@ -1681,10 +1782,14 @@ bitint_large_huge::handle_cast (tree lhs_type, tree
rhs1, tree idx)
t = m_data[m_data_cnt + 1];
else
{
- tree type = limb_access_type (lhs_type, idx);
+ tree type = limb_access_type (lhs_type, idx, true);
t = m_data[m_data_cnt + 2];
if (!useless_type_conversion_p (type, m_limb_type))
t = add_cast (type, t);
+
+ if (info.extended && !m_force_extend
+ && !types_compatible_p (type, m_limb_type))
+ t = add_cast (m_limb_type, t);
}
m_data_cnt += 3;
return t;
@@ -1697,7 +1802,7 @@ bitint_large_huge::handle_cast (tree lhs_type, tree rhs1,
tree idx)
}
if (tree_fits_uhwi_p (idx))
{
- tree type = limb_access_type (lhs_type, idx);
+ tree type = limb_access_type (lhs_type, idx, true);
if (integer_zerop (idx))
t = m_data[m_data_cnt];
else if (TYPE_PRECISION (rhs_type) > limb_prec
@@ -1707,6 +1812,11 @@ bitint_large_huge::handle_cast (tree lhs_type, tree
rhs1, tree idx)
t = m_data[m_data_cnt + 2];
if (!useless_type_conversion_p (type, m_limb_type))
t = add_cast (type, t);
+
+ if (info.extended && !m_force_extend
+ && !types_compatible_p (type, m_limb_type))
+ t = add_cast (m_limb_type, t);
+
m_data_cnt += 3;
return t;
}
@@ -1847,6 +1957,7 @@ bitint_large_huge::handle_load (gimple *stmt, tree idx)
{
tree rhs1 = gimple_assign_rhs1 (stmt);
tree rhs_type = TREE_TYPE (rhs1);
+ bool load_bitfield_p = false;
bool eh = stmt_ends_bb_p (stmt);
edge eh_edge = NULL;
gimple *g;
@@ -1870,11 +1981,17 @@ bitint_large_huge::handle_load (gimple *stmt, tree idx)
gcc_assert (tree_fits_uhwi_p (DECL_FIELD_BIT_OFFSET (fld)));
if (DECL_OFFSET_ALIGN (fld) >= TYPE_ALIGN (TREE_TYPE (rhs1))
&& (tree_to_uhwi (DECL_FIELD_BIT_OFFSET (fld)) % limb_prec) == 0)
- goto normal_load;
+ {
+ load_bitfield_p = true;
+ goto normal_load;
+ }
/* Even if DECL_FIELD_BIT_OFFSET (fld) is a multiple of UNITS_PER_BIT,
handle it normally for now. */
if ((tree_to_uhwi (DECL_FIELD_BIT_OFFSET (fld)) % BITS_PER_UNIT) == 0)
- goto normal_load;
+ {
+ load_bitfield_p = true;
+ goto normal_load;
+ }
tree repr = DECL_BIT_FIELD_REPRESENTATIVE (fld);
poly_int64 bitoffset;
poly_uint64 field_offset, repr_offset;
@@ -2069,9 +2186,15 @@ bitint_large_huge::handle_load (gimple *stmt, tree idx)
}
if (tree_fits_uhwi_p (idx))
{
- tree atype = limb_access_type (rhs_type, idx);
+ tree atype = limb_access_type (rhs_type, idx, true);
if (!useless_type_conversion_p (atype, TREE_TYPE (iv)))
iv = add_cast (atype, iv);
+
+ /* For ABI-extended _BitInts, convert them back to nominal type
+ after truncation to work with other operands out there. */
+ tree ntype = limb_access_type (rhs_type, idx);
+ if (!types_compatible_p (ntype, atype))
+ iv = add_cast (ntype, iv);
}
m_data_cnt += 3;
return iv;
@@ -2085,6 +2208,20 @@ normal_load:
tree ret = make_ssa_name (TREE_TYPE (rhs1));
g = gimple_build_assign (ret, rhs1);
insert_before (g);
+
+ /* A "normal load" of a bitfield may bring over other bitfields
+ sharing the same representative at a higher offset.
+ Truncate them if required. */
+
+ bool truncate_p = false;
+ if (load_bitfield_p)
+ {
+ struct bitint_info info;
+ bool ok = targetm.c.bitint_type_info (TYPE_PRECISION (rhs_type), &info);
+ gcc_assert (ok);
+ truncate_p = info.extended;
+ }
+
if (eh)
{
maybe_duplicate_eh_stmt (g, stmt);
@@ -2094,13 +2231,24 @@ normal_load:
m_gsi = gsi_after_labels (e->dest);
add_eh_edge (e->src, eh_edge);
}
- if (tree_fits_uhwi_p (idx))
+ if (tree_fits_uhwi_p (idx) && !truncate_p)
{
tree atype = limb_access_type (rhs_type, idx);
if (!useless_type_conversion_p (atype, TREE_TYPE (rhs1)))
ret = add_cast (atype, ret);
}
}
+
+ if (truncate_p && tree_fits_uhwi_p (idx))
+ {
+ tree atype = limb_access_type (rhs_type, idx, true);
+ if (!useless_type_conversion_p (atype, TREE_TYPE (rhs1)))
+ {
+ ret = add_cast (atype, ret);
+ ret = add_cast (TREE_TYPE (rhs1), ret);
+ }
+ }
+
return ret;
}
@@ -2129,20 +2277,36 @@ bitint_large_huge::handle_stmt (gimple *stmt, tree idx)
g = gimple_build_assign (lhs, gimple_assign_rhs_code (stmt),
rhs1, rhs2);
insert_before (g);
+
+ /* For BIT_NOT_EXPR, truncate the partial limb if necessary. */
+ if (rhs2 == NULL_TREE
+ && tree_fits_uhwi_p (idx))
+ {
+ tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
+ tree atype = limb_access_type (rhs_type, idx, true);
+ struct bitint_info info;
+ bool ok = targetm.c.bitint_type_info (TYPE_PRECISION (rhs_type),
+ &info);
+ gcc_assert (ok);
+
+ if (info.extended
+ && !types_compatible_p (atype, TREE_TYPE (rhs1)))
+ {
+ lhs = add_cast (atype, lhs);
+ lhs = add_cast (TREE_TYPE (rhs1), lhs);
+ }
+ }
return lhs;
case PLUS_EXPR:
case MINUS_EXPR:
- rhs1 = handle_operand (gimple_assign_rhs1 (stmt), idx);
- rhs2 = handle_operand (gimple_assign_rhs2 (stmt), idx);
return handle_plus_minus (gimple_assign_rhs_code (stmt),
- rhs1, rhs2, idx);
+ gimple_assign_rhs1 (stmt),
+ gimple_assign_rhs2 (stmt), idx);
case NEGATE_EXPR:
- rhs2 = handle_operand (gimple_assign_rhs1 (stmt), idx);
- rhs1 = build_zero_cst (TREE_TYPE (rhs2));
- return handle_plus_minus (MINUS_EXPR, rhs1, rhs2, idx);
+ return handle_plus_minus (MINUS_EXPR, NULL_TREE,
+ gimple_assign_rhs1 (stmt), idx);
case LSHIFT_EXPR:
- return handle_lshift (handle_operand (gimple_assign_rhs1 (stmt),
- idx),
+ return handle_lshift (gimple_assign_rhs1 (stmt),
gimple_assign_rhs2 (stmt), idx);
case SSA_NAME:
case PAREN_EXPR:
@@ -2642,8 +2806,20 @@ bitint_large_huge::lower_mergeable_stmt (gimple *stmt,
tree_code &cmp_code,
idx = size_int (end + (i > 2));
if (eq_p)
{
+ struct bitint_info info;
+ bool ok = targetm.c.bitint_type_info (prec, &info);
+ gcc_assert (ok);
+
+ bool m_force_extend_save = m_force_extend;
+ if (info.extended)
+ m_force_extend = true;
+
rhs1 = handle_operand (cmp_op1, idx);
tree rhs2 = handle_operand (cmp_op2, idx);
+
+ if (info.extended)
+ m_force_extend = m_force_extend_save;
+
g = gimple_build_cond (NE_EXPR, rhs1, rhs2, NULL_TREE, NULL_TREE);
insert_before (g);
edge e1 = split_block (gsi_bb (m_gsi), g);
@@ -2925,6 +3101,19 @@ bitint_large_huge::lower_mergeable_stmt (gimple *stmt,
tree_code &cmp_code,
/* Otherwise, stores to any other lhs. */
if (!done)
{
+ struct bitint_info info;
+ bool ok = targetm.c.bitint_type_info (TYPE_PRECISION (type),
+ &info);
+ gcc_assert (ok);
+
+ /* Truncate the partial limb for both the large / huge cases. */
+ if (info.extended && i == cnt - 1 && prec % limb_prec != 0)
+ {
+ // gcc_assert (tree_fits_uhwi_p (idx));
+ rhs1 = add_cast (limb_access_type (type, idx, true), rhs1);
+ rhs1 = add_cast (m_limb_type, rhs1);
+ }
+
tree l = limb_access (nlhs ? NULL_TREE : lhs_type,
nlhs ? nlhs : lhs, nidx, true);
g = gimple_build_assign (l, rhs1);
@@ -3497,6 +3686,30 @@ bitint_large_huge::lower_shift_stmt (tree obj, gimple
*stmt)
insert_before (g);
t1 = gimple_assign_lhs (g);
}
+
+ /* Truncate the result if the target ABI requires so. */
+ struct bitint_info info;
+ bool ok = targetm.c.bitint_type_info (prec, &info);
+ gcc_assert (ok);
+
+ if (info.extended)
+ {
+ tree hi_type = limb_access_type (TREE_TYPE (lhs), p, true);
+ t1 = build3 (COND_EXPR, TREE_TYPE (t1),
+ fold_build2 (EQ_EXPR, boolean_type_node, idx, p),
+ build1 (NOP_EXPR, TREE_TYPE (t1),
+ build1 (CONVERT_EXPR, hi_type, t1)),
+ t1);
+
+ t1 = force_gimple_operand_gsi (&m_gsi, t1, true, NULL_TREE, true,
+ GSI_SAME_STMT);
+
+ g = gimple_build_assign (make_ssa_name (TREE_TYPE (t1)), t1);
+ insert_before (g);
+
+ t1 = gimple_assign_lhs (g);
+ }
+
tree l = limb_access (TREE_TYPE (lhs), obj, idx, true);
g = gimple_build_assign (l, t1);
insert_before (g);
@@ -3529,6 +3742,25 @@ bitint_large_huge::lower_shift_stmt (tree obj, gimple
*stmt)
LSHIFT_EXPR, t1, n1);
insert_before (g);
t1 = gimple_assign_lhs (g);
+
+ if (info.extended)
+ {
+ tree hi_type = limb_access_type (TREE_TYPE (lhs), p, true);
+ t1 = build3 (COND_EXPR, TREE_TYPE (t1),
+ fold_build2 (EQ_EXPR, boolean_type_node, idx, p),
+ build1 (NOP_EXPR, TREE_TYPE (t1),
+ build1 (CONVERT_EXPR, hi_type, t1)),
+ t1);
+
+ t1 = force_gimple_operand_gsi (&m_gsi, t1, true, NULL_TREE, true,
+ GSI_SAME_STMT);
+
+ g = gimple_build_assign (make_ssa_name (TREE_TYPE (t1)), t1);
+ insert_before (g);
+
+ t1 = gimple_assign_lhs (g);
+ }
+
l = limb_access (TREE_TYPE (lhs), obj, idx, true);
g = gimple_build_assign (l, t1);
insert_before (g);
@@ -3624,6 +3856,33 @@ bitint_large_huge::lower_muldiv_stmt (tree obj, gimple
*stmt)
default:
gcc_unreachable ();
}
+
+ /* Truncate the result if necessary (libgcc won't do this now). */
+ if (prec % limb_prec != 0)
+ {
+ struct bitint_info info;
+ bool ok = targetm.c.bitint_type_info (prec, &info);
+ gcc_assert (ok);
+
+ if (info.extended)
+ {
+ tree idx = size_int (prec / limb_prec);
+ tree src = limb_access (type, obj, idx, false);
+ tree dst = limb_access (type, obj, idx, true);
+ tree hi_type = limb_access_type (type, idx, true);
+
+ g = gimple_build_assign (make_ssa_name (m_limb_type), src);
+ insert_before (g);
+ src = gimple_assign_lhs (g);
+
+ src = add_cast (hi_type, src);
+ src = add_cast (m_limb_type, src);
+
+ g = gimple_build_assign (dst, src);
+ insert_before (g);
+ }
+ }
+
if (stmt_ends_bb_p (stmt))
{
maybe_duplicate_eh_stmt (g, stmt);
@@ -3655,7 +3914,8 @@ bitint_large_huge::lower_float_conv_stmt (tree obj,
gimple *stmt)
gimple *g;
if (rhs_code == FIX_TRUNC_EXPR)
{
- int prec = TYPE_PRECISION (TREE_TYPE (lhs));
+ tree type = TREE_TYPE (lhs);
+ int prec = TYPE_PRECISION (type);
if (!TYPE_UNSIGNED (TREE_TYPE (lhs)))
prec = -prec;
if (obj == NULL_TREE)
@@ -3669,7 +3929,7 @@ bitint_large_huge::lower_float_conv_stmt (tree obj,
gimple *stmt)
{
lhs = build_fold_addr_expr (obj);
lhs = force_gimple_operand_gsi (&m_gsi, lhs, true,
- NULL_TREE, true, GSI_SAME_STMT);
+ NULL_TREE, true, GSI_SAME_STMT);
}
scalar_mode from_mode
= as_a <scalar_mode> (TYPE_MODE (TREE_TYPE (rhs1)));
@@ -3690,6 +3950,34 @@ bitint_large_huge::lower_float_conv_stmt (tree obj,
gimple *stmt)
lhs, build_int_cst (sitype, prec),
rhs1);
insert_before (g);
+
+ /* Truncate the result if necessary (libgcc won't do this now). */
+ if (prec % limb_prec != 0)
+ {
+ prec = prec < 0 ? -prec : prec;
+
+ struct bitint_info info;
+ bool ok = targetm.c.bitint_type_info (prec, &info);
+ gcc_assert (ok);
+
+ if (info.extended)
+ {
+ tree idx = size_int (prec / limb_prec);
+ tree src = limb_access (type, obj, idx, false);
+ tree dst = limb_access (type, obj, idx, true);
+ tree hi_type = limb_access_type (type, idx, true);
+
+ g = gimple_build_assign (make_ssa_name (m_limb_type), src);
+ insert_before (g);
+ src = gimple_assign_lhs (g);
+
+ src = add_cast (hi_type, src);
+ src = add_cast (m_limb_type, src);
+
+ g = gimple_build_assign (dst, src);
+ insert_before (g);
+ }
+ }
}
else
{
@@ -3882,6 +4170,34 @@ bitint_large_huge::finish_arith_overflow (tree var, tree
obj, tree type,
g = gimple_build_assign (v1, v2);
insert_before (g);
}
+
+ /* Truncate the result if necessary (libgcc won't do this now). */
+ int prec = TYPE_PRECISION (type);
+ if (obj && prec % limb_prec != 0)
+ {
+ struct bitint_info info;
+ bool ok = targetm.c.bitint_type_info (prec, &info);
+ gcc_assert (ok);
+
+ if (info.extended)
+ {
+ tree idx = size_int (prec / limb_prec);
+ tree src = limb_access (type, obj, idx, false);
+ tree dst = limb_access (type, obj, idx, true);
+ tree hi_type = limb_access_type (type, idx, true);
+
+ g = gimple_build_assign (make_ssa_name (m_limb_type), src);
+ insert_before (g);
+ src = gimple_assign_lhs (g);
+
+ src = add_cast (hi_type, src);
+ src = add_cast (m_limb_type, src);
+
+ g = gimple_build_assign (dst, src);
+ insert_before (g);
+ }
+ }
+
if (orig_obj == NULL_TREE && obj)
{
ovf = add_cast (m_limb_type, ovf);
@@ -4234,7 +4550,7 @@ bitint_large_huge::lower_addsub_overflow (tree obj,
gimple *stmt)
}
}
}
- tree rhs = handle_plus_minus (code, rhs1, rhs2, idx);
+ tree rhs = handle_plus_minus_1 (code, rhs1, rhs2, idx);
if (ovf != boolean_false_node)
{
if (tree_fits_uhwi_p (idx))
@@ -5416,6 +5732,7 @@ bitint_large_huge::lower_stmt (gimple *stmt)
m_upwards = false;
m_var_msb = false;
m_cast_conditional = false;
+ m_force_extend = false;
m_bitfld_load = 0;
m_loc = gimple_location (stmt);
if (is_gimple_call (stmt))
--
2.34.1