<div dir="ltr"><div class="gmail_default" style="font-family:'trebuchet ms',sans-serif;font-size:small">Ok. I will fix the issue and re-submit these patches.</div></div><div class="gmail_extra"><br><div class="gmail_quote">On Sat, Apr 18, 2015 at 12:16 AM, Steve Borho <span dir="ltr"><<a href="mailto:steve@borho.org" target="_blank">steve@borho.org</a>></span> wrote:<br><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex"><span class="">On 04/16, <a href="mailto:dnyaneshwar@multicorewareinc.com">dnyaneshwar@multicorewareinc.com</a> wrote:<br>
> # HG changeset patch<br>
> # User Dnyaneshwar G <<a href="mailto:dnyaneshwar@multicorewareinc.com">dnyaneshwar@multicorewareinc.com</a>><br>
> # Date 1429164512 -19800<br>
> # Thu Apr 16 11:38:32 2015 +0530<br>
> # Node ID 507135d8bcdcb496783c49b4b0304b961a68c253<br>
> # Parent f9c0e1f233cc15ccce4eb96adef11583af082f33<br>
> asm: new optimized algorithm for satd, improved ~30% over previous algorithm<br>
<br>
</span>this was pushed, but I'm seeing intermittent test failures:<br>
<br>
steve@zeppelin> ./test/TestBench<br>
Using random seed 553152D3 8bpp<br>
Testing primitives: SSE2<br>
Testing primitives: SSE3<br>
Testing primitives: SSSE3<br>
Testing primitives: SSE4<br>
Testing primitives: AVX<br>
Testing primitives: AVX2<br>
chroma_satd[i420][32x32] failed!<br>
<br>
x265: asm primitive has failed. Go and fix that Right Now!<br>
<br>
This commit will be backed out, The later satd_16xN patch needs this one<br>
and so it has been de-queued as well. Both will have to be re-submitted<br>
after this is fixed<br>
<div><div class="h5"><br>
> diff -r f9c0e1f233cc -r 507135d8bcdc source/common/x86/pixel-a.asm<br>
> --- a/source/common/x86/pixel-a.asm Wed Apr 15 16:20:27 2015 +0530<br>
> +++ b/source/common/x86/pixel-a.asm Thu Apr 16 11:38:32 2015 +0530<br>
> @@ -10519,156 +10519,102 @@<br>
> %if ARCH_X86_64 == 1<br>
> INIT_YMM avx2<br>
> cglobal calc_satd_16x8 ; function to compute satd cost for 16 columns, 8 rows<br>
> - movu xm4, [r0]<br>
> - movu xm5, [r0 + r1]<br>
> - movu xm0, [r2]<br>
> - movu xm1, [r2 + r3]<br>
> -<br>
> - vpermq m4, m4, 01010000b<br>
> - vpermq m5, m5, 01010000b<br>
> - vpermq m0, m0, 01010000b<br>
> - vpermq m1, m1, 01010000b<br>
> -<br>
> - pmaddubsw m4, m7<br>
> - pmaddubsw m0, m7<br>
> - pmaddubsw m5, m7<br>
> - pmaddubsw m1, m7<br>
> - psubw m0, m4<br>
> - psubw m1, m5<br>
> -<br>
> - movu xm4, [r0 + r1 * 2]<br>
> - movu xm5, [r0 + r4]<br>
> - movu xm2, [r2 + r3 * 2]<br>
> - movu xm3, [r2 + r5]<br>
> -<br>
> - vpermq m4, m4, 01010000b<br>
> - vpermq m5, m5, 01010000b<br>
> - vpermq m2, m2, 01010000b<br>
> - vpermq m3, m3, 01010000b<br>
> -<br>
> - pmaddubsw m4, m7<br>
> - pmaddubsw m2, m7<br>
> - pmaddubsw m5, m7<br>
> - pmaddubsw m3, m7<br>
> - psubw m2, m4<br>
> - psubw m3, m5<br>
> -<br>
> - lea r0, [r0 + r1 * 4]<br>
> - lea r2, [r2 + r3 * 4]<br>
> -<br>
> - paddw m4, m0, m1<br>
> - psubw m1, m1, m0<br>
> - paddw m0, m2, m3<br>
> - psubw m3, m3, m2<br>
> - paddw m2, m4, m0<br>
> - psubw m0, m0, m4<br>
> - paddw m4, m1, m3<br>
> - psubw m3, m3, m1<br>
> - pabsw m2, m2<br>
> - pabsw m0, m0<br>
> - pabsw m4, m4<br>
> - pabsw m3, m3<br>
> - pblendw m1, m2, m0, 10101010b<br>
> - pslld m0, m0, 16<br>
> - psrld m2, m2, 16<br>
> - por m0, m0, m2<br>
> - pmaxsw m1, m1, m0<br>
> - pxor m9, m9, m9<br>
> - mova m8, m1<br>
> - punpcklwd m8, m8, m9<br>
> - paddd m6, m6, m8<br>
> - mova m8, m1<br>
> - punpckhwd m8, m8, m9<br>
> - paddd m6, m6, m8<br>
> - pblendw m2, m4, m3, 10101010b<br>
> - pslld m3, m3, 16<br>
> - psrld m4, m4, 16<br>
> - por m3, m3, m4<br>
> - pmaxsw m2, m2, m3<br>
> - pxor m9, m9, m9<br>
> - mova m8, m2<br>
> - punpcklwd m8, m8, m9<br>
> - paddd m6, m6, m8<br>
> - mova m8, m2<br>
> - punpckhwd m8, m8, m9<br>
> - paddd m6, m6, m8<br>
> -<br>
> - movu xm4, [r0]<br>
> - movu xm5, [r0 + r1]<br>
> - movu xm1, [r2]<br>
> - movu xm2, [r2 + r3]<br>
> -<br>
> - vpermq m4, m4, 01010000b<br>
> - vpermq m5, m5, 01010000b<br>
> - vpermq m1, m1, 01010000b<br>
> - vpermq m2, m2, 01010000b<br>
> -<br>
> - pmaddubsw m4, m4, m7<br>
> - pmaddubsw m1, m1, m7<br>
> - pmaddubsw m5, m5, m7<br>
> - pmaddubsw m2, m2, m7<br>
> - psubw m1, m1, m4<br>
> - psubw m2, m2, m5<br>
> -<br>
> - movu xm4, [r0 + r1 * 2]<br>
> - movu xm5, [r0 + r4]<br>
> - movu xm0, [r2 + r3 * 2]<br>
> - movu xm3, [r2 + r5]<br>
> -<br>
> - vpermq m4, m4, 01010000b<br>
> - vpermq m5, m5, 01010000b<br>
> - vpermq m0, m0, 01010000b<br>
> - vpermq m3, m3, 01010000b<br>
> -<br>
> - lea r0, [r0 + r1 * 4]<br>
> - lea r2, [r2 + r3 * 4]<br>
> -<br>
> - pmaddubsw m4, m4, m7<br>
> - pmaddubsw m0, m0, m7<br>
> - pmaddubsw m5, m5, m7<br>
> - pmaddubsw m3, m3, m7<br>
> - psubw m0, m0, m4<br>
> - psubw m3, m3, m5<br>
> - paddw m4, m1, m2<br>
> - psubw m2, m2, m1<br>
> - paddw m1, m0, m3<br>
> - psubw m3, m3, m0<br>
> - paddw m0, m4, m1<br>
> - psubw m1, m1, m4<br>
> - paddw m4, m2, m3<br>
> - psubw m3, m3, m2<br>
> - pabsw m0, m0<br>
> - pabsw m1, m1<br>
> - pabsw m4, m4<br>
> - pabsw m3, m3<br>
> - pblendw m2, m0, m1, 10101010b<br>
> - pslld m1, m1, 16<br>
> - psrld m0, m0, 16<br>
> - por m1, m1, m0<br>
> - pmaxsw m2, m2, m1<br>
> - pxor m9, m9, m9<br>
> - mova m8, m2<br>
> - punpcklwd m8, m8, m9<br>
> - paddd m6, m6, m8<br>
> - mova m8, m2<br>
> - punpckhwd m8, m8, m9<br>
> - paddd m6, m6, m8<br>
> - pblendw m0, m4, m3, 10101010b<br>
> - pslld m3, m3, 16<br>
> - psrld m4, m4, 16<br>
> - por m3, m3, m4<br>
> - pmaxsw m0, m0, m3<br>
> - pxor m9, m9, m9<br>
> - mova m8, m0<br>
> - punpcklwd m8, m8, m9<br>
> - paddd m6, m6, m8<br>
> - mova m8, m0<br>
> - punpckhwd m8, m8, m9<br>
> - paddd m6, m6, m8<br>
> + vbroadcasti128 m0, [r0]<br>
> + vbroadcasti128 m4, [r2]<br>
> + vbroadcasti128 m1, [r0 + r1]<br>
> + vbroadcasti128 m5, [r2 + r3]<br>
> + pmaddubsw m4, m7<br>
> + pmaddubsw m0, m7<br>
> + pmaddubsw m5, m7<br>
> + pmaddubsw m1, m7<br>
> + psubw m0, m4<br>
> + psubw m1, m5<br>
> + vbroadcasti128 m2, [r0 + r1 * 2]<br>
> + vbroadcasti128 m4, [r2 + r3 * 2]<br>
> + vbroadcasti128 m3, [r0 + r4]<br>
> + vbroadcasti128 m5, [r2 + r5]<br>
> + pmaddubsw m4, m7<br>
> + pmaddubsw m2, m7<br>
> + pmaddubsw m5, m7<br>
> + pmaddubsw m3, m7<br>
> + psubw m2, m4<br>
> + psubw m3, m5<br>
> + lea r0, [r0 + r1 * 4]<br>
> + lea r2, [r2 + r3 * 4]<br>
> + paddw m4, m0, m1<br>
> + psubw m1, m1, m0<br>
> + paddw m0, m2, m3<br>
> + psubw m3, m2<br>
> + paddw m2, m4, m0<br>
> + psubw m0, m4<br>
> + paddw m4, m1, m3<br>
> + psubw m3, m1<br>
> + pabsw m2, m2<br>
> + pabsw m0, m0<br>
> + pabsw m4, m4<br>
> + pabsw m3, m3<br>
> + pblendw m1, m2, m0, 10101010b<br>
> + pslld m0, 16<br>
> + psrld m2, 16<br>
> + por m0, m2<br>
> + pmaxsw m1, m0<br>
> + paddw m6, m1<br>
> + pblendw m2, m4, m3, 10101010b<br>
> + pslld m3, 16<br>
> + psrld m4, 16<br>
> + por m3, m4<br>
> + pmaxsw m2, m3<br>
> + paddw m6, m2<br>
> + vbroadcasti128 m1, [r0]<br>
> + vbroadcasti128 m4, [r2]<br>
> + vbroadcasti128 m2, [r0 + r1]<br>
> + vbroadcasti128 m5, [r2 + r3]<br>
> + pmaddubsw m4, m7<br>
> + pmaddubsw m1, m7<br>
> + pmaddubsw m5, m7<br>
> + pmaddubsw m2, m7<br>
> + psubw m1, m4<br>
> + psubw m2, m5<br>
> + vbroadcasti128 m0, [r0 + r1 * 2]<br>
> + vbroadcasti128 m4, [r2 + r3 * 2]<br>
> + vbroadcasti128 m3, [r0 + r4]<br>
> + vbroadcasti128 m5, [r2 + r5]<br>
> + lea r0, [r0 + r1 * 4]<br>
> + lea r2, [r2 + r3 * 4]<br>
> + pmaddubsw m4, m7<br>
> + pmaddubsw m0, m7<br>
> + pmaddubsw m5, m7<br>
> + pmaddubsw m3, m7<br>
> + psubw m0, m4<br>
> + psubw m3, m5<br>
> + paddw m4, m1, m2<br>
> + psubw m2, m1<br>
> + paddw m1, m0, m3<br>
> + psubw m3, m0<br>
> + paddw m0, m4, m1<br>
> + psubw m1, m4<br>
> + paddw m4, m2, m3<br>
> + psubw m3, m2<br>
> + pabsw m0, m0<br>
> + pabsw m1, m1<br>
> + pabsw m4, m4<br>
> + pabsw m3, m3<br>
> + pblendw m2, m0, m1, 10101010b<br>
> + pslld m1, 16<br>
> + psrld m0, 16<br>
> + por m1, m0<br>
> + pmaxsw m2, m1<br>
> + paddw m6, m2<br>
> + pblendw m0, m4, m3, 10101010b<br>
> + pslld m3, 16<br>
> + psrld m4, 16<br>
> + por m3, m4<br>
> + pmaxsw m0, m3<br>
> + paddw m6, m0<br>
> ret<br>
><br>
> -cglobal pixel_satd_32x8, 4,8,10 ; if WIN64 && cpuflag(avx2)<br>
> - mova m7, [hmul_8p]<br>
> +cglobal pixel_satd_32x8, 4,8,8 ; if WIN64 && cpuflag(avx2)<br>
> + mova m7, [hmul_16p]<br>
> lea r4, [3 * r1]<br>
> lea r5, [3 * r3]<br>
> pxor m6, m6<br>
> @@ -10682,17 +10628,18 @@<br>
><br>
> call calc_satd_16x8<br>
><br>
> - vextracti128 xm8, m6, 1<br>
> - paddd xm6, xm8<br>
> - movhlps xm7, xm6<br>
> - paddd xm6, xm7<br>
> - pshufd xm7, xm6, 1<br>
> - paddd xm6, xm7<br>
> - movd eax, xm6<br>
> - RET<br>
> -<br>
> -cglobal pixel_satd_32x16, 4,8,10 ; if WIN64 && cpuflag(avx2)<br>
> - mova m7, [hmul_8p]<br>
> + vextracti128 xm0, m6, 1<br>
> + paddw xm0, xm6<br>
> + pmaddwd xm0, [pw_1]<br>
> + movhlps xm7, xm0<br>
> + paddd xm0, xm7<br>
> + pshuflw xm7, xm0, q0032<br>
> + paddd xm0, xm7<br>
> + movd eax, xm0<br>
> + RET<br>
> +<br>
> +cglobal pixel_satd_32x16, 4,8,8 ; if WIN64 && cpuflag(avx2)<br>
> + mova m7, [hmul_16p]<br>
> lea r4, [3 * r1]<br>
> lea r5, [3 * r3]<br>
> pxor m6, m6<br>
> @@ -10701,22 +10648,25 @@<br>
><br>
> call calc_satd_16x8<br>
> call calc_satd_16x8<br>
> +<br>
> lea r0, [r6 + 16]<br>
> lea r2, [r7 + 16]<br>
> +<br>
> call calc_satd_16x8<br>
> call calc_satd_16x8<br>
><br>
> - vextracti128 xm8, m6, 1<br>
> - paddd xm6, xm8<br>
> - movhlps xm7, xm6<br>
> - paddd xm6, xm7<br>
> - pshufd xm7, xm6, 1<br>
> - paddd xm6, xm7<br>
> - movd eax, xm6<br>
> - RET<br>
> -<br>
> -cglobal pixel_satd_32x24, 4,8,10 ; if WIN64 && cpuflag(avx2)<br>
> - mova m7, [hmul_8p]<br>
> + vextracti128 xm0, m6, 1<br>
> + paddw xm0, xm6<br>
> + pmaddwd xm0, [pw_1]<br>
> + movhlps xm7, xm0<br>
> + paddd xm0, xm7<br>
> + pshuflw xm7, xm0, q0032<br>
> + paddd xm0, xm7<br>
> + movd eax, xm0<br>
> + RET<br>
> +<br>
> +cglobal pixel_satd_32x24, 4,8,8 ; if WIN64 && cpuflag(avx2)<br>
> + mova m7, [hmul_16p]<br>
> lea r4, [3 * r1]<br>
> lea r5, [3 * r3]<br>
> pxor m6, m6<br>
> @@ -10726,23 +10676,26 @@<br>
> call calc_satd_16x8<br>
> call calc_satd_16x8<br>
> call calc_satd_16x8<br>
> +<br>
> lea r0, [r6 + 16]<br>
> lea r2, [r7 + 16]<br>
> +<br>
> call calc_satd_16x8<br>
> call calc_satd_16x8<br>
> call calc_satd_16x8<br>
><br>
> - vextracti128 xm8, m6, 1<br>
> - paddd xm6, xm8<br>
> - movhlps xm7, xm6<br>
> - paddd xm6, xm7<br>
> - pshufd xm7, xm6, 1<br>
> - paddd xm6, xm7<br>
> - movd eax, xm6<br>
> - RET<br>
> -<br>
> -cglobal pixel_satd_32x32, 4,8,10 ; if WIN64 && cpuflag(avx2)<br>
> - mova m7, [hmul_8p]<br>
> + vextracti128 xm0, m6, 1<br>
> + paddw xm0, xm6<br>
> + pmaddwd xm0, [pw_1]<br>
> + movhlps xm7, xm0<br>
> + paddd xm0, xm7<br>
> + pshuflw xm7, xm0, q0032<br>
> + paddd xm0, xm7<br>
> + movd eax, xm0<br>
> + RET<br>
> +<br>
> +cglobal pixel_satd_32x32, 4,8,8 ; if WIN64 && cpuflag(avx2)<br>
> + mova m7, [hmul_16p]<br>
> lea r4, [3 * r1]<br>
> lea r5, [3 * r3]<br>
> pxor m6, m6<br>
> @@ -10753,24 +10706,27 @@<br>
> call calc_satd_16x8<br>
> call calc_satd_16x8<br>
> call calc_satd_16x8<br>
> +<br>
> lea r0, [r6 + 16]<br>
> lea r2, [r7 + 16]<br>
> +<br>
> call calc_satd_16x8<br>
> call calc_satd_16x8<br>
> call calc_satd_16x8<br>
> call calc_satd_16x8<br>
><br>
> - vextracti128 xm8, m6, 1<br>
> - paddd xm6, xm8<br>
> - movhlps xm7, xm6<br>
> - paddd xm6, xm7<br>
> - pshufd xm7, xm6, 1<br>
> - paddd xm6, xm7<br>
> - movd eax, xm6<br>
> - RET<br>
> -<br>
> -cglobal pixel_satd_32x64, 4,8,10 ; if WIN64 && cpuflag(avx2)<br>
> - mova m7, [hmul_8p]<br>
> + vextracti128 xm0, m6, 1<br>
> + paddw xm0, xm6<br>
> + pmaddwd xm0, [pw_1]<br>
> + movhlps xm7, xm0<br>
> + paddd xm0, xm7<br>
> + pshuflw xm7, xm0, q0032<br>
> + paddd xm0, xm7<br>
> + movd eax, xm0<br>
> + RET<br>
> +<br>
> +cglobal pixel_satd_32x64, 4,8,9 ; if WIN64 && cpuflag(avx2)<br>
> + mova m7, [hmul_16p]<br>
> lea r4, [3 * r1]<br>
> lea r5, [3 * r3]<br>
> pxor m6, m6<br>
> @@ -10785,8 +10741,13 @@<br>
> call calc_satd_16x8<br>
> call calc_satd_16x8<br>
> call calc_satd_16x8<br>
> +<br>
> + mova m8, m6 ; to avoid overflow, move to another register<br>
> + pxor m6, m6<br>
> +<br>
> lea r0, [r6 + 16]<br>
> lea r2, [r7 + 16]<br>
> +<br>
> call calc_satd_16x8<br>
> call calc_satd_16x8<br>
> call calc_satd_16x8<br>
> @@ -10796,13 +10757,18 @@<br>
> call calc_satd_16x8<br>
> call calc_satd_16x8<br>
><br>
> - vextracti128 xm8, m6, 1<br>
> - paddd xm6, xm8<br>
> - movhlps xm7, xm6<br>
> - paddd xm6, xm7<br>
> - pshufd xm7, xm6, 1<br>
> - paddd xm6, xm7<br>
> - movd eax, xm6<br>
> + vextracti128 xm1, m8, 1<br>
> + vextracti128 xm0, m6, 1<br>
> + paddw xm1, xm8<br>
> + paddw xm0, xm6<br>
> + pmaddwd xm1, [pw_1]<br>
> + pmaddwd xm0, [pw_1]<br>
> + paddd xm0, xm1<br>
> + movhlps xm7, xm0<br>
> + paddd xm0, xm7<br>
> + pshuflw xm7, xm0, q0032<br>
> + paddd xm0, xm7<br>
> + movd eax, xm0<br>
> RET<br>
><br>
> %endif ; if ARCH_X86_64 == 1<br>
</div></div>> _______________________________________________<br>
> x265-devel mailing list<br>
> <a href="mailto:x265-devel@videolan.org">x265-devel@videolan.org</a><br>
> <a href="https://mailman.videolan.org/listinfo/x265-devel" target="_blank">https://mailman.videolan.org/listinfo/x265-devel</a><br>
<span class="HOEnZb"><font color="#888888"><br>
--<br>
Steve Borho<br>
_______________________________________________<br>
x265-devel mailing list<br>
<a href="mailto:x265-devel@videolan.org">x265-devel@videolan.org</a><br>
<a href="https://mailman.videolan.org/listinfo/x265-devel" target="_blank">https://mailman.videolan.org/listinfo/x265-devel</a><br>
</font></span></blockquote></div><br></div>