[x265] [PATCH 2 of 3] asm: avx2 code for sad_x3_32xN, improved over 40% than SSE

dnyaneshwar at multicorewareinc.com dnyaneshwar at multicorewareinc.com
Thu Sep 10 14:03:54 CEST 2015


# HG changeset patch
# User Dnyaneshwar G <dnyaneshwar at multicorewareinc.com>
# Date 1441885683 -19800
#      Thu Sep 10 17:18:03 2015 +0530
# Node ID 5b5d7438e90196d7974b9ceec2130b6c924e2342
# Parent  abab4304e992b7addb65ad8fbdfe309ba57732a6
asm: avx2 code for sad_x3_32xN, improved over 40% than SSE

diff -r abab4304e992 -r 5b5d7438e901 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Thu Sep 10 11:40:35 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp	Thu Sep 10 17:18:03 2015 +0530
@@ -3571,6 +3571,12 @@
         p.cu[BLOCK_64x64].copy_ps = PFX(blockcopy_ps_64x64_avx2);
         p.planeClipAndMax = PFX(planeClipAndMax_avx2);
 
+        p.pu[LUMA_32x8].sad_x3 = PFX(pixel_sad_x3_32x8_avx2);
+        p.pu[LUMA_32x16].sad_x3 = PFX(pixel_sad_x3_32x16_avx2);
+        p.pu[LUMA_32x24].sad_x3 = PFX(pixel_sad_x3_32x24_avx2);
+        p.pu[LUMA_32x32].sad_x3 = PFX(pixel_sad_x3_32x32_avx2);
+        p.pu[LUMA_32x64].sad_x3 = PFX(pixel_sad_x3_32x64_avx2);
+
         /* The following primitives have been disabled since performance compared to SSE is negligible/negative */
 #if 0
         p.pu[LUMA_8x4].addAvg = PFX(addAvg_8x4_avx2);
diff -r abab4304e992 -r 5b5d7438e901 source/common/x86/sad-a.asm
--- a/source/common/x86/sad-a.asm	Thu Sep 10 11:40:35 2015 +0530
+++ b/source/common/x86/sad-a.asm	Thu Sep 10 17:18:03 2015 +0530
@@ -3949,6 +3949,272 @@
     movd            [r5 + 8], xm1
     RET
 
+%if ARCH_X86_64 == 1 && HIGH_BIT_DEPTH == 0
+INIT_YMM avx2
+%macro SAD_X3_32x8_AVX2 0
+    movu            m3, [r0]
+    movu            m4, [r1]
+    movu            m5, [r2]
+    movu            m6, [r3]
+
+    psadbw          m7, m3, m4
+    paddd           m0, m7
+    psadbw          m7, m3, m5
+    paddd           m1, m7
+    psadbw          m3, m6
+    paddd           m2, m3
+
+    movu            m3, [r0 + FENC_STRIDE]
+    movu            m4, [r1 + r4]
+    movu            m5, [r2 + r4]
+    movu            m6, [r3 + r4]
+
+    psadbw          m7, m3, m4
+    paddd           m0, m7
+    psadbw          m4, m3, m5
+    paddd           m1, m4
+    psadbw          m3, m6
+    paddd           m2, m3
+
+    movu            m3, [r0 + FENC_STRIDE * 2]
+    movu            m4, [r1 + r4 * 2]
+    movu            m5, [r2 + r4 * 2]
+    movu            m6, [r3 + r4 * 2]
+
+    psadbw          m7, m3, m4
+    paddd           m0, m7
+    psadbw          m4, m3, m5
+    paddd           m1, m4
+    psadbw          m3, m6
+    paddd           m2, m3
+
+    movu            m3, [r0 + FENC_STRIDE * 3]
+    movu            m4, [r1 + r6]
+    movu            m5, [r2 + r6]
+    movu            m6, [r3 + r6]
+
+    psadbw          m7, m3, m4
+    paddd           m0, m7
+    psadbw          m4, m3, m5
+    paddd           m1, m4
+    psadbw          m3, m6
+    paddd           m2, m3
+
+    lea             r0, [r0 + FENC_STRIDE * 4]
+    lea             r1, [r1 + r4 * 4]
+    lea             r2, [r2 + r4 * 4]
+    lea             r3, [r3 + r4 * 4]
+
+    movu            m3, [r0]
+    movu            m4, [r1]
+    movu            m5, [r2]
+    movu            m6, [r3]
+
+    psadbw          m7, m3, m4
+    paddd           m0, m7
+    psadbw          m4, m3, m5
+    paddd           m1, m4
+    psadbw          m3, m6
+    paddd           m2, m3
+
+    movu            m3, [r0 + FENC_STRIDE]
+    movu            m4, [r1 + r4]
+    movu            m5, [r2 + r4]
+    movu            m6, [r3 + r4]
+
+    psadbw          m7, m3, m4
+    paddd           m0, m7
+    psadbw          m4, m3, m5
+    paddd           m1, m4
+    psadbw          m3, m6
+    paddd           m2, m3
+
+    movu            m3, [r0 + FENC_STRIDE * 2]
+    movu            m4, [r1 + r4 * 2]
+    movu            m5, [r2 + r4 * 2]
+    movu            m6, [r3 + r4 * 2]
+
+    psadbw          m7, m3, m4
+    paddd           m0, m7
+    psadbw          m4, m3, m5
+    paddd           m1, m4
+    psadbw          m3, m6
+    paddd           m2, m3
+
+    movu            m3, [r0 + FENC_STRIDE * 3]
+    movu            m4, [r1 + r6]
+    movu            m5, [r2 + r6]
+    movu            m6, [r3 + r6]
+
+    psadbw          m7, m3, m4
+    paddd           m0, m7
+    psadbw          m4, m3, m5
+    paddd           m1, m4
+    psadbw          m3, m6
+    paddd           m2, m3
+%endmacro
+
+%macro PIXEL_SAD_X3_END_AVX2 0
+    vextracti128   xm3, m0, 1
+    vextracti128   xm4, m1, 1
+    vextracti128   xm5, m2, 1
+    paddd           m0, m3
+    paddd           m1, m4
+    paddd           m2, m5
+    pshufd         xm3, xm0, 2
+    pshufd         xm4, xm1, 2
+    pshufd         xm5, xm2, 2
+    paddd           m0, m3
+    paddd           m1, m4
+    paddd           m2, m5
+
+    movd            [r5 + 0], xm0
+    movd            [r5 + 4], xm1
+    movd            [r5 + 8], xm2
+%endmacro
+
+cglobal pixel_sad_x3_32x8, 6,7,8
+    pxor            m0, m0
+    pxor            m1, m1
+    pxor            m2, m2
+    lea             r6, [r4 * 3]
+
+    SAD_X3_32x8_AVX2
+    PIXEL_SAD_X3_END_AVX2
+    RET
+
+cglobal pixel_sad_x3_32x16, 6,7,8
+    pxor            m0, m0
+    pxor            m1, m1
+    pxor            m2, m2
+    lea             r6, [r4 * 3]
+
+    SAD_X3_32x8_AVX2
+
+    lea             r0, [r0 + FENC_STRIDE * 4]
+    lea             r1, [r1 + r4 * 4]
+    lea             r2, [r2 + r4 * 4]
+    lea             r3, [r3 + r4 * 4]
+
+    SAD_X3_32x8_AVX2
+    PIXEL_SAD_X3_END_AVX2
+    RET
+
+cglobal pixel_sad_x3_32x24, 6,7,8
+    pxor            m0, m0
+    pxor            m1, m1
+    pxor            m2, m2
+    lea             r6, [r4 * 3]
+
+    SAD_X3_32x8_AVX2
+
+    lea             r0, [r0 + FENC_STRIDE * 4]
+    lea             r1, [r1 + r4 * 4]
+    lea             r2, [r2 + r4 * 4]
+    lea             r3, [r3 + r4 * 4]
+
+    SAD_X3_32x8_AVX2
+
+    lea             r0, [r0 + FENC_STRIDE * 4]
+    lea             r1, [r1 + r4 * 4]
+    lea             r2, [r2 + r4 * 4]
+    lea             r3, [r3 + r4 * 4]
+
+    SAD_X3_32x8_AVX2
+    PIXEL_SAD_X3_END_AVX2
+    RET
+
+cglobal pixel_sad_x3_32x32, 6,7,8
+    pxor            m0, m0
+    pxor            m1, m1
+    pxor            m2, m2
+    lea             r6, [r4 * 3]
+
+    SAD_X3_32x8_AVX2
+
+    lea             r0, [r0 + FENC_STRIDE * 4]
+    lea             r1, [r1 + r4 * 4]
+    lea             r2, [r2 + r4 * 4]
+    lea             r3, [r3 + r4 * 4]
+
+    SAD_X3_32x8_AVX2
+
+    lea             r0, [r0 + FENC_STRIDE * 4]
+    lea             r1, [r1 + r4 * 4]
+    lea             r2, [r2 + r4 * 4]
+    lea             r3, [r3 + r4 * 4]
+
+    SAD_X3_32x8_AVX2
+
+    lea             r0, [r0 + FENC_STRIDE * 4]
+    lea             r1, [r1 + r4 * 4]
+    lea             r2, [r2 + r4 * 4]
+    lea             r3, [r3 + r4 * 4]
+
+    SAD_X3_32x8_AVX2
+    PIXEL_SAD_X3_END_AVX2
+    RET
+
+cglobal pixel_sad_x3_32x64, 6,7,8
+    pxor            m0, m0
+    pxor            m1, m1
+    pxor            m2, m2
+    lea             r6, [r4 * 3]
+
+    SAD_X3_32x8_AVX2
+
+    lea             r0, [r0 + FENC_STRIDE * 4]
+    lea             r1, [r1 + r4 * 4]
+    lea             r2, [r2 + r4 * 4]
+    lea             r3, [r3 + r4 * 4]
+
+    SAD_X3_32x8_AVX2
+
+    lea             r0, [r0 + FENC_STRIDE * 4]
+    lea             r1, [r1 + r4 * 4]
+    lea             r2, [r2 + r4 * 4]
+    lea             r3, [r3 + r4 * 4]
+
+    SAD_X3_32x8_AVX2
+
+    lea             r0, [r0 + FENC_STRIDE * 4]
+    lea             r1, [r1 + r4 * 4]
+    lea             r2, [r2 + r4 * 4]
+    lea             r3, [r3 + r4 * 4]
+
+    SAD_X3_32x8_AVX2
+
+    lea             r0, [r0 + FENC_STRIDE * 4]
+    lea             r1, [r1 + r4 * 4]
+    lea             r2, [r2 + r4 * 4]
+    lea             r3, [r3 + r4 * 4]
+
+    SAD_X3_32x8_AVX2
+
+    lea             r0, [r0 + FENC_STRIDE * 4]
+    lea             r1, [r1 + r4 * 4]
+    lea             r2, [r2 + r4 * 4]
+    lea             r3, [r3 + r4 * 4]
+
+    SAD_X3_32x8_AVX2
+
+    lea             r0, [r0 + FENC_STRIDE * 4]
+    lea             r1, [r1 + r4 * 4]
+    lea             r2, [r2 + r4 * 4]
+    lea             r3, [r3 + r4 * 4]
+
+    SAD_X3_32x8_AVX2
+
+    lea             r0, [r0 + FENC_STRIDE * 4]
+    lea             r1, [r1 + r4 * 4]
+    lea             r2, [r2 + r4 * 4]
+    lea             r3, [r3 + r4 * 4]
+
+    SAD_X3_32x8_AVX2
+    PIXEL_SAD_X3_END_AVX2
+    RET
+%endif
+
 INIT_YMM avx2
 cglobal pixel_sad_x4_8x8, 7,7,5
     xorps           m0, m0


More information about the x265-devel mailing list