https://gcc.gnu.org/bugzilla/show_bug.cgi?id=98172

--- Comment #10 from Hongtao.liu <crazylht at gmail dot com> ---
(In reply to Hongtao.liu from comment #9)
> > .L3:
> >     vmovupd (%rcx,%rax), %xmm3
> >     vmovupd (%rsi,%rax), %xmm4
> >     vinsertf128     $0x1, 16(%rcx,%rax), %ymm3, %ymm0
> >     vinsertf128     $0x1, 16(%rsi,%rax), %ymm4, %ymm2
> >     vmovupd (%rdi,%rax), %xmm5
> >     vinsertf128     $0x1, 16(%rdi,%rax), %ymm5, %ymm1
> >     vfmadd132pd     %ymm2, %ymm1, %ymm0
> >     vmovupd %xmm0, (%rdx,%rax)
> >     vextractf128    $0x1, %ymm0, 16(%rdx,%rax)
> >     addq    $32, %rax
> >     cmpq    $2048, %rax
> >     jne     .L3
> >     vzeroupper
> >     ret
> 
> The kernel loop could be better as
>  
> .L3:
>       vmovupd (%rcx,%rax), %ymm0
>       vmovupd (%rdi,%rax), %ymm1
>       vfmadd132pd     (%rsi,%rax), %ymm1, %ymm0
>       vmovupd %ymm0, (%rdx,%rax)
>       addq    $32, %rax
>       cmpq    $2048, %rax
>       jne     .L3

It went into movmisalign<mode>, and finally be splitted into parts by
ix86_avx256_split_vector_move_misalign, and the differences between
-mtune=generic and -mtune=haswell matters here is
X86_TUNE_AVX256_UNALIGNED_LOAD_OPTIMAL and
X86_TUNE_AVX256_UNALIGNED_STORE_OPTIMAL

-------
/* X86_TUNE_AVX256_UNALIGNED_LOAD_OPTIMAL: if false, unaligned loads are
   split.  */
DEF_TUNE (X86_TUNE_AVX256_UNALIGNED_LOAD_OPTIMAL, "256_unaligned_load_optimal",
          ~(m_NEHALEM | m_SANDYBRIDGE | m_GENERIC))

/* X86_TUNE_AVX256_UNALIGNED_STORE_OPTIMAL: if false, unaligned stores are
   split.  */
DEF_TUNE (X86_TUNE_AVX256_UNALIGNED_STORE_OPTIMAL,
"256_unaligned_store_optimal",
          ~(m_NEHALEM | m_SANDYBRIDGE | m_BDVER | m_ZNVER1 | m_GENERIC))
--------

manually adding two tunes to generic
gcc -S -O3 y.c -mavx2 -mfma
-mtune-ctrl="256_unaligned_load_optimal,256_unaligned_store_optimal"
successfully generate optimial codes.

.L3:
        vmovupd (%rcx,%rax), %ymm0
        vmovupd (%rdi,%rax), %ymm1
        vfmadd132pd     (%rsi,%rax), %ymm1, %ymm0
        vmovupd %ymm0, (%rdx,%rax)
        addq    $32, %rax
        cmpq    $2048, %rax
        jne     .L3
        vzeroupper
        ret
.L5:

Reply via email to