The following relaxes the assert in vectorizable_live_operation where we catch currently unhandled cases to also allow an intermediate copy as it happens here but also relax the assert to checking only.
Bootstrapped and tested on x86_64-unknown-linux-gnu, pushed. PR tree-optimization/109573 * tree-vect-loop.cc (vectorizable_live_operation): Allow unhandled SSA copy as well. Demote assert to checking only. * g++.dg/vect/pr109573.cc: New testcase. --- gcc/testsuite/g++.dg/vect/pr109573.cc | 91 +++++++++++++++++++++++++++ gcc/tree-vect-loop.cc | 7 ++- 2 files changed, 95 insertions(+), 3 deletions(-) create mode 100644 gcc/testsuite/g++.dg/vect/pr109573.cc diff --git a/gcc/testsuite/g++.dg/vect/pr109573.cc b/gcc/testsuite/g++.dg/vect/pr109573.cc new file mode 100644 index 00000000000..d96f86f9579 --- /dev/null +++ b/gcc/testsuite/g++.dg/vect/pr109573.cc @@ -0,0 +1,91 @@ +// { dg-do compile } +// { dg-require-effective-target c++20 } + +void *operator new(__SIZE_TYPE__, void *__p) { return __p; } +template <typename _Head> struct _Head_base { + _Head _M_head_impl; +}; +template <unsigned long, typename...> struct _Tuple_impl; +template <unsigned long _Idx, typename _Head, typename... _Tail> +struct _Tuple_impl<_Idx, _Head, _Tail...> : _Tuple_impl<_Idx + 1, _Tail...>, + _Head_base<_Head> { + template <typename _UHead, typename... _UTail> + _Tuple_impl(_UHead __head, _UTail... __tail) + : _Tuple_impl<_Idx + 1, _Tail...>(__tail...), _Head_base<_Head>(__head) {} +}; +template <unsigned long _Idx, typename _Head> struct _Tuple_impl<_Idx, _Head> { + template <typename _UHead> _Tuple_impl(_UHead); +}; +template <typename... _Elements> struct tuple : _Tuple_impl<0, _Elements...> { + template <typename... _UElements> + tuple(_UElements... __elements) + : _Tuple_impl<0, _Elements...>(__elements...) {} +}; +unsigned long position_; +struct Zone { + template <typename T, typename... Args> T *New(Args... args) { + return new (reinterpret_cast<void *>(position_)) T(args...); + } +}; +struct Label { + int pos_; + int near_link_pos_; +}; +enum Condition { below_equal }; +void bind(Label *); +Zone *zone(); +unsigned long deopt_info_address(); +int MakeDeferredCode___trans_tmp_2, MakeDeferredCode___trans_tmp_3, + Prologue___trans_tmp_6, MakeDeferredCode___trans_tmp_1; +struct MaglevAssembler { + template <typename Function, typename... Args> + void MakeDeferredCode(Function &&, Args &&...); + template <typename Function, typename... Args> + void JumpToDeferredIf(Condition, Function, Args... args) { + MakeDeferredCode(Function(), args...); + } + void Prologue(); +}; +struct ZoneLabelRef { + ZoneLabelRef(Zone *zone) : label_(zone->New<Label>()) {} + ZoneLabelRef(MaglevAssembler *) : ZoneLabelRef(zone()) {} + Label *operator*() { return label_; } + Label *label_; +}; +template <typename Function> +struct FunctionArgumentsTupleHelper + : FunctionArgumentsTupleHelper<decltype(&Function::operator())> {}; +template <typename C, typename R, typename... A> +struct FunctionArgumentsTupleHelper<R (C::*)(A...) const> { + using Tuple = tuple<A...>; +}; +template <typename> struct StripFirstTupleArg; +template <typename T1, typename... T> +struct StripFirstTupleArg<tuple<T1, T...>> { + using Stripped = tuple<T...>; +}; +template <typename Function> struct DeferredCodeInfoImpl { + template <typename... InArgs> + DeferredCodeInfoImpl(int *, int, int, Function, InArgs... args) + : args(args...) {} + StripFirstTupleArg< + typename FunctionArgumentsTupleHelper<Function>::Tuple>::Stripped args; +}; +template <typename Function, typename... Args> +void MaglevAssembler::MakeDeferredCode(Function &&deferred_code_gen, + Args &&...args) { + zone()->New<DeferredCodeInfoImpl<Function>>( + &MakeDeferredCode___trans_tmp_1, MakeDeferredCode___trans_tmp_2, + MakeDeferredCode___trans_tmp_3, deferred_code_gen, args...); +} +void MaglevAssembler::Prologue() { + int *__trans_tmp_9; + ZoneLabelRef deferred_call_stack_guard_return(this); + __trans_tmp_9 = reinterpret_cast<int *>(deopt_info_address()); + JumpToDeferredIf( + below_equal, [](MaglevAssembler, int *, ZoneLabelRef, int, int) {}, + __trans_tmp_9, deferred_call_stack_guard_return, Prologue___trans_tmp_6, + 0); + Label __trans_tmp_7 = **deferred_call_stack_guard_return; + bind(&__trans_tmp_7); +} diff --git a/gcc/tree-vect-loop.cc b/gcc/tree-vect-loop.cc index ba28214f09a..6ea0f21fd13 100644 --- a/gcc/tree-vect-loop.cc +++ b/gcc/tree-vect-loop.cc @@ -10114,9 +10114,10 @@ vectorizable_live_operation (vec_info *vinfo, use_stmt)) { enum tree_code code = gimple_assign_rhs_code (use_stmt); - gcc_assert (code == CONSTRUCTOR - || code == VIEW_CONVERT_EXPR - || CONVERT_EXPR_CODE_P (code)); + gcc_checking_assert (code == SSA_NAME + || code == CONSTRUCTOR + || code == VIEW_CONVERT_EXPR + || CONVERT_EXPR_CODE_P (code)); if (dump_enabled_p ()) dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location, "Using original scalar computation for " -- 2.35.3