[x265] [PATCH 2 of 3] asm: avx2 code for sad_x3_32xN, improved over 40% than SSE
chen
chenm003 at 163.com
Thu Sep 10 19:26:26 CEST 2015
just a little modift
At 2015-09-10 20:03:54,dnyaneshwar at multicorewareinc.com wrote:
># HG changeset patch
># User Dnyaneshwar G <dnyaneshwar at multicorewareinc.com>
># Date 1441885683 -19800
># Thu Sep 10 17:18:03 2015 +0530
># Node ID 5b5d7438e90196d7974b9ceec2130b6c924e2342
># Parent abab4304e992b7addb65ad8fbdfe309ba57732a6
>asm: avx2 code for sad_x3_32xN, improved over 40% than SSE
>
>diff -r abab4304e992 -r 5b5d7438e901 source/common/x86/asm-primitives.cpp
>--- a/source/common/x86/asm-primitives.cpp Thu Sep 10 11:40:35 2015 +0530
>+++ b/source/common/x86/asm-primitives.cpp Thu Sep 10 17:18:03 2015 +0530
>@@ -3571,6 +3571,12 @@
> p.cu[BLOCK_64x64].copy_ps = PFX(blockcopy_ps_64x64_avx2);
> p.planeClipAndMax = PFX(planeClipAndMax_avx2);
>
>+ p.pu[LUMA_32x8].sad_x3 = PFX(pixel_sad_x3_32x8_avx2);
>+ p.pu[LUMA_32x16].sad_x3 = PFX(pixel_sad_x3_32x16_avx2);
>+ p.pu[LUMA_32x24].sad_x3 = PFX(pixel_sad_x3_32x24_avx2);
>+ p.pu[LUMA_32x32].sad_x3 = PFX(pixel_sad_x3_32x32_avx2);
>+ p.pu[LUMA_32x64].sad_x3 = PFX(pixel_sad_x3_32x64_avx2);
>+
> /* The following primitives have been disabled since performance compared to SSE is negligible/negative */
> #if 0
> p.pu[LUMA_8x4].addAvg = PFX(addAvg_8x4_avx2);
>diff -r abab4304e992 -r 5b5d7438e901 source/common/x86/sad-a.asm
>--- a/source/common/x86/sad-a.asm Thu Sep 10 11:40:35 2015 +0530
>+++ b/source/common/x86/sad-a.asm Thu Sep 10 17:18:03 2015 +0530
>@@ -3949,6 +3949,272 @@
> movd [r5 + 8], xm1
> RET
>
>+%if ARCH_X86_64 == 1 && HIGH_BIT_DEPTH == 0
>+INIT_YMM avx2
>+%macro SAD_X3_32x8_AVX2 0
>+ movu m3, [r0]
>+ movu m4, [r1]
>+ movu m5, [r2]
>+ movu m6, [r3]
>+
>+ psadbw m7, m3, m4
>+ paddd m0, m7
>+ psadbw m7, m3, m5
>+ paddd m1, m7
>+ psadbw m3, m6
>+ paddd m2, m3
>+
>+ movu m3, [r0 + FENC_STRIDE]
>+ movu m4, [r1 + r4]
>+ movu m5, [r2 + r4]
>+ movu m6, [r3 + r4]
>+
>+ psadbw m7, m3, m4
>+ paddd m0, m7
>+ psadbw m4, m3, m5
>+ paddd m1, m4
>+ psadbw m3, m6
>+ paddd m2, m3
>+
>+ movu m3, [r0 + FENC_STRIDE * 2]
>+ movu m4, [r1 + r4 * 2]
>+ movu m5, [r2 + r4 * 2]
>+ movu m6, [r3 + r4 * 2]
>+
>+ psadbw m7, m3, m4
>+ paddd m0, m7
>+ psadbw m4, m3, m5
>+ paddd m1, m4
>+ psadbw m3, m6
>+ paddd m2, m3
>+
>+ movu m3, [r0 + FENC_STRIDE * 3]
>+ movu m4, [r1 + r6]
>+ movu m5, [r2 + r6]
>+ movu m6, [r3 + r6]
>+
>+ psadbw m7, m3, m4
>+ paddd m0, m7
>+ psadbw m4, m3, m5
>+ paddd m1, m4
>+ psadbw m3, m6
>+ paddd m2, m3
>+
>+ lea r0, [r0 + FENC_STRIDE * 4]
ADD
>+ lea r1, [r1 + r4 * 4]
>+ lea r2, [r2 + r4 * 4]
>+ lea r3, [r3 + r4 * 4]
>+
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mailman.videolan.org/pipermail/x265-devel/attachments/20150911/3c5111f0/attachment.html>
More information about the x265-devel
mailing list