http://gcc.gnu.org/bugzilla/show_bug.cgi?id=45381
--- Comment #12 from Iain Sandoe <iains at gcc dot gnu.org> 2011-02-08 21:25:05 UTC --- (In reply to comment #11) > Yes, a patch like in #1 would be fine. I will produce one in due course (unless Dominique beats me to it...) -- the hack below is purely to satisfy curiosity :-) -- I doubt it is worth unravelling how to make it work in reality.. The hack produces a working stage1 compiler built from gcc-4.0.1 or gcc-4.2.1 it seems that gcc-4.0.1 does not recognize __attribute__((altivec(vector))) and altivec.h does not indeed define the functions it complains about... --- Index: libcpp/lex.c =================================================================== --- libcpp/lex.c (revision 169914) +++ libcpp/lex.c (working copy) @@ -21,6 +21,7 @@ along with this program; see the file COPYING3. I <http://www.gnu.org/licenses/>. */ #include "config.h" +#include <altivec.h> #include "system.h" #include "cpplib.h" #include "internal.h" @@ -519,10 +520,11 @@ init_vectorized_lexer (void) so we can't compile this function without -maltivec on the command line (or implied by some other switch). */ + static const uchar * search_line_fast (const uchar *s, const uchar *end ATTRIBUTE_UNUSED) { - typedef __attribute__((altivec(vector))) unsigned char vc; + typedef __vector unsigned char vc ; const vc repl_nl = { '\n', '\n', '\n', '\n', '\n', '\n', '\n', '\n', @@ -550,14 +552,14 @@ search_line_fast (const uchar *s, const uchar *end /* Altivec loads automatically mask addresses with -16. This lets us issue the first load as early as possible. */ - data = __builtin_vec_ld(0, (const vc *)s); + data = vec_ld(0, (const vc *)s); /* Discard bytes before the beginning of the buffer. Do this by beginning with all ones and shifting in zeros according to the mis-alignment. The LVSR instruction pulls the exact shift we want from the address. */ - mask = __builtin_vec_lvsr(0, s); - mask = __builtin_vec_perm(zero, ones, mask); + mask = vec_lvsr(0, s); + mask = vec_perm(zero, ones, mask); data &= mask; /* While altivec loads mask addresses, we still need to align S so @@ -571,20 +573,20 @@ search_line_fast (const uchar *s, const uchar *end vc m_nl, m_cr, m_bs, m_qm; s += 16; - data = __builtin_vec_ld(0, (const vc *)s); + data = vec_ld(0, (const vc *)s); start: - m_nl = (vc) __builtin_vec_cmpeq(data, repl_nl); - m_cr = (vc) __builtin_vec_cmpeq(data, repl_cr); - m_bs = (vc) __builtin_vec_cmpeq(data, repl_bs); - m_qm = (vc) __builtin_vec_cmpeq(data, repl_qm); + m_nl = (vc) vec_cmpeq(data, repl_nl); + m_cr = (vc) vec_cmpeq(data, repl_cr); + m_bs = (vc) vec_cmpeq(data, repl_bs); + m_qm = (vc) vec_cmpeq(data, repl_qm); t = (m_nl | m_cr) | (m_bs | m_qm); /* T now contains 0xff in bytes for which we matched one of the relevant characters. We want to exit the loop if any byte in T is non-zero. Below is the expansion of vec_any_ne(t, zero). */ } - while (!__builtin_vec_vcmpeq_p(/*__CR6_LT_REV*/3, t, zero)); + while (!vec_any_ne(t, zero)); { #define N (sizeof(vc) / sizeof(long))