[x265] [PATCH] asm: avx2 code for sad_x3_32xN, improved over 40% than SSE

chen chenm003 at 163.com
Fri Sep 25 16:54:31 CEST 2015




At 2015-09-25 12:53:59,dnyaneshwar at multicorewareinc.com wrote:
># HG changeset patch
># User Dnyaneshwar G <dnyaneshwar at multicorewareinc.com>
># Date 1443156551 -19800
>#      Fri Sep 25 10:19:11 2015 +0530
># Node ID 310d35ed0ba85174676d0b0bb91e6b8b5f475726
># Parent  975352b2c0223b9139aad233b43eaf2113ac8167
>asm: avx2 code for sad_x3_32xN, improved over 40% than SSE
>
>diff -r 975352b2c022 -r 310d35ed0ba8 source/common/x86/asm-primitives.cpp
>--- a/source/common/x86/asm-primitives.cpp	Wed Sep 23 16:19:48 2015 +0530
>+++ b/source/common/x86/asm-primitives.cpp	Fri Sep 25 10:19:11 2015 +0530
>@@ -3587,6 +3587,12 @@
>         p.cu[BLOCK_64x64].copy_ps = PFX(blockcopy_ps_64x64_avx2);
>         p.planeClipAndMax = PFX(planeClipAndMax_avx2);
> 
>+        p.pu[LUMA_32x8].sad_x3 = PFX(pixel_sad_x3_32x8_avx2);
>+        p.pu[LUMA_32x16].sad_x3 = PFX(pixel_sad_x3_32x16_avx2);
>+        p.pu[LUMA_32x24].sad_x3 = PFX(pixel_sad_x3_32x24_avx2);
>+        p.pu[LUMA_32x32].sad_x3 = PFX(pixel_sad_x3_32x32_avx2);
>+        p.pu[LUMA_32x64].sad_x3 = PFX(pixel_sad_x3_32x64_avx2);
>+
>         /* The following primitives have been disabled since performance compared to SSE is negligible/negative */
> #if 0
>         p.pu[LUMA_8x4].addAvg = PFX(addAvg_8x4_avx2);
>diff -r 975352b2c022 -r 310d35ed0ba8 source/common/x86/sad-a.asm
>--- a/source/common/x86/sad-a.asm	Wed Sep 23 16:19:48 2015 +0530
>+++ b/source/common/x86/sad-a.asm	Fri Sep 25 10:19:11 2015 +0530
>@@ -4674,6 +4674,272 @@
>     movd            [r5 + 8], xm1
>     RET
> 
>+%if ARCH_X86_64 == 1 && HIGH_BIT_DEPTH == 0
>+INIT_YMM avx2
>+%macro SAD_X3_32x8_AVX2 0
>+    movu            m3, [r0]
>+    movu            m4, [r1]
>+    movu            m5, [r2]
>+    movu            m6, [r3]
>+
>+    psadbw          m7, m3, m4
>+    paddd           m0, m7
>+    psadbw          m7, m3, m5
>+    paddd           m1, m7
>+    psadbw          m3, m6
>+    paddd           m2, m3
>+
>+    movu            m3, [r0 + FENC_STRIDE]
>+    movu            m4, [r1 + r4]
>+    movu            m5, [r2 + r4]
>+    movu            m6, [r3 + r4]
>+
>+    psadbw          m7, m3, m4
>+    paddd           m0, m7
>+    psadbw          m4, m3, m5
>+    paddd           m1, m4
>+    psadbw          m3, m6
>+    paddd           m2, m3
>+
>+    movu            m3, [r0 + FENC_STRIDE * 2]
>+    movu            m4, [r1 + r4 * 2]
>+    movu            m5, [r2 + r4 * 2]
>+    movu            m6, [r3 + r4 * 2]
>+
>+    psadbw          m7, m3, m4
>+    paddd           m0, m7
>+    psadbw          m4, m3, m5
>+    paddd           m1, m4
>+    psadbw          m3, m6
>+    paddd           m2, m3
>+
>+    movu            m3, [r0 + FENC_STRIDE * 3]
>+    movu            m4, [r1 + r6]
>+    movu            m5, [r2 + r6]
>+    movu            m6, [r3 + r6]
>+
>+    psadbw          m7, m3, m4
>+    paddd           m0, m7
>+    psadbw          m4, m3, m5
>+    paddd           m1, m4
>+    psadbw          m3, m6
>+    paddd           m2, m3
>+
>+    add             r0, FENC_STRIDE * 4
>+    lea             r1, [r1 + r4 * 4]
>+    lea             r2, [r2 + r4 * 4]
>+    lea             r3, [r3 + r4 * 4]
>+
>+    movu            m3, [r0]
>+    movu            m4, [r1]
>+    movu            m5, [r2]
>+    movu            m6, [r3]
>+
>+    psadbw          m7, m3, m4
>+    paddd           m0, m7
>+    psadbw          m4, m3, m5
>+    paddd           m1, m4
>+    psadbw          m3, m6
>+    paddd           m2, m3
>+
>+    movu            m3, [r0 + FENC_STRIDE]
>+    movu            m4, [r1 + r4]
>+    movu            m5, [r2 + r4]
>+    movu            m6, [r3 + r4]
>+
>+    psadbw          m7, m3, m4
>+    paddd           m0, m7
>+    psadbw          m4, m3, m5
>+    paddd           m1, m4
>+    psadbw          m3, m6
>+    paddd           m2, m3
>+
>+    movu            m3, [r0 + FENC_STRIDE * 2]
>+    movu            m4, [r1 + r4 * 2]
>+    movu            m5, [r2 + r4 * 2]
>+    movu            m6, [r3 + r4 * 2]
>+
>+    psadbw          m7, m3, m4
>+    paddd           m0, m7
>+    psadbw          m4, m3, m5
>+    paddd           m1, m4
>+    psadbw          m3, m6
>+    paddd           m2, m3
>+
>+    movu            m3, [r0 + FENC_STRIDE * 3]
>+    movu            m4, [r1 + r6]
>+    movu            m5, [r2 + r6]
>+    movu            m6, [r3 + r6]
>+
>+    psadbw          m7, m3, m4
>+    paddd           m0, m7
>+    psadbw          m4, m3, m5
>+    paddd           m1, m4
>+    psadbw          m3, m6
>+    paddd           m2, m3
>+%endmacro
>+
>+%macro PIXEL_SAD_X3_END_AVX2 0
>+    vextracti128   xm3, m0, 1
>+    vextracti128   xm4, m1, 1
>+    vextracti128   xm5, m2, 1
>+    paddd           m0, m3
>+    paddd           m1, m4
>+    paddd           m2, m5
>+    pshufd         xm3, xm0, 2
>+    pshufd         xm4, xm1, 2
>+    pshufd         xm5, xm2, 2
>+    paddd           m0, m3
>+    paddd           m1, m4
>+    paddd           m2, m5

this is equal to macro HADDD

-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mailman.videolan.org/pipermail/x265-devel/attachments/20150925/e51d23ee/attachment-0001.html>


More information about the x265-devel mailing list