[x265] [PATCH] asm: avx2 code for sad_x4_48x64, improved over 25% than SSE

rajesh at multicorewareinc.com rajesh at multicorewareinc.com
Tue Sep 15 10:53:22 CEST 2015


# HG changeset patch
# User Rajesh Paulraj<rajesh at multicorewareinc.com>
# Date 1442296837 -19800
#      Tue Sep 15 11:30:37 2015 +0530
# Node ID 01a21374067d5ee6f4b4fe6eb512afeb44974f39
# Parent  5fd2ef7bbf09f771d479a11eec2256d02fadf1cf
asm: avx2 code for sad_x4_48x64, improved over 25% than SSE

diff -r 5fd2ef7bbf09 -r 01a21374067d source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Tue Sep 15 11:25:28 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp	Tue Sep 15 11:30:37 2015 +0530
@@ -2866,6 +2866,7 @@
         p.pu[LUMA_32x64].sad_x4 = PFX(pixel_sad_x4_32x64_avx2);
         p.pu[LUMA_32x24].sad_x4 = PFX(pixel_sad_x4_32x24_avx2);
         p.pu[LUMA_32x8].sad_x4 = PFX(pixel_sad_x4_32x8_avx2);
+        p.pu[LUMA_48x64].sad_x4 = PFX(pixel_sad_x4_48x64_avx2);
         p.pu[LUMA_64x16].sad_x4 = PFX(pixel_sad_x4_64x16_avx2);
         p.pu[LUMA_64x32].sad_x4 = PFX(pixel_sad_x4_64x32_avx2);
         p.pu[LUMA_64x48].sad_x4 = PFX(pixel_sad_x4_64x48_avx2);
diff -r 5fd2ef7bbf09 -r 01a21374067d source/common/x86/sad-a.asm
--- a/source/common/x86/sad-a.asm	Tue Sep 15 11:25:28 2015 +0530
+++ b/source/common/x86/sad-a.asm	Tue Sep 15 11:30:37 2015 +0530
@@ -3773,6 +3773,283 @@
     SAD_X4_64x8_AVX2
     PIXEL_SAD_X4_END_AVX2
     RET
+
+%macro SAD_X4_48x8_AVX2 0
+    movu            m4, [r0]
+    movu            m5, [r1]
+    movu            m6, [r2]
+    movu            m7, [r3]
+    movu            m8, [r4]
+
+    psadbw          m9, m4, m5
+    paddd           m0, m9
+    psadbw          m5, m4, m6
+    paddd           m1, m5
+    psadbw          m6, m4, m7
+    paddd           m2, m6
+    psadbw          m4, m8
+    paddd           m3, m4
+
+    movu            xm4, [r0 + mmsize]
+    movu            xm5, [r1 + mmsize]
+    movu            xm6, [r2 + mmsize]
+    movu            xm7, [r3 + mmsize]
+    movu            xm8, [r4 + mmsize]
+
+    vinserti128     m4, m4, [r0 + FENC_STRIDE], 1
+    vinserti128     m5, m5, [r1 + r5], 1
+    vinserti128     m6, m6, [r2 + r5], 1
+    vinserti128     m7, m7, [r3 + r5], 1
+    vinserti128     m8, m8, [r4 + r5], 1
+
+    psadbw          m9, m4, m5
+    paddd           m0, m9
+    psadbw          m5, m4, m6
+    paddd           m1, m5
+    psadbw          m6, m4, m7
+    paddd           m2, m6
+    psadbw          m4, m8
+    paddd           m3, m4
+
+    movu            m4, [r0 + FENC_STRIDE + mmsize/2]
+    movu            m5, [r1 + r5 + mmsize/2]
+    movu            m6, [r2 + r5 + mmsize/2]
+    movu            m7, [r3 + r5 + mmsize/2]
+    movu            m8, [r4 + r5 + mmsize/2]
+
+    psadbw          m9, m4, m5
+    paddd           m0, m9
+    psadbw          m5, m4, m6
+    paddd           m1, m5
+    psadbw          m6, m4, m7
+    paddd           m2, m6
+    psadbw          m4, m8
+    paddd           m3, m4
+
+    movu            m4, [r0 + FENC_STRIDE * 2]
+    movu            m5, [r1 + r5 * 2]
+    movu            m6, [r2 + r5 * 2]
+    movu            m7, [r3 + r5 * 2]
+    movu            m8, [r4 + r5 * 2]
+
+    psadbw          m9, m4, m5
+    paddd           m0, m9
+    psadbw          m5, m4, m6
+    paddd           m1, m5
+    psadbw          m6, m4, m7
+    paddd           m2, m6
+    psadbw          m4, m8
+    paddd           m3, m4
+
+    movu            xm4, [r0 + FENC_STRIDE * 2 + mmsize]
+    movu            xm5, [r1 + r5 * 2 + mmsize]
+    movu            xm6, [r2 + r5 * 2 + mmsize]
+    movu            xm7, [r3 + r5 * 2 + mmsize]
+    movu            xm8, [r4 + r5 * 2 + mmsize]
+    vinserti128     m4, m4, [r0 + FENC_STRIDE * 3], 1
+    vinserti128     m5, m5, [r1 + r7], 1
+    vinserti128     m6, m6, [r2 + r7], 1
+    vinserti128     m7, m7, [r3 + r7], 1
+    vinserti128     m8, m8, [r4 + r7], 1
+
+    psadbw          m9, m4, m5
+    paddd           m0, m9
+    psadbw          m5, m4, m6
+    paddd           m1, m5
+    psadbw          m6, m4, m7
+    paddd           m2, m6
+    psadbw          m4, m8
+    paddd           m3, m4
+
+    movu            m4, [r0 + FENC_STRIDE * 3 + mmsize/2]
+    movu            m5, [r1 + r7 + mmsize/2]
+    movu            m6, [r2 + r7 + mmsize/2]
+    movu            m7, [r3 + r7 + mmsize/2]
+    movu            m8, [r4 + r7 + mmsize/2]
+
+    psadbw          m9, m4, m5
+    paddd           m0, m9
+    psadbw          m5, m4, m6
+    paddd           m1, m5
+    psadbw          m6, m4, m7
+    paddd           m2, m6
+    psadbw          m4, m8
+    paddd           m3, m4
+
+    add             r0, FENC_STRIDE * 4
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+
+    movu            m4, [r0]
+    movu            m5, [r1]
+    movu            m6, [r2]
+    movu            m7, [r3]
+    movu            m8, [r4]
+
+    psadbw          m9, m4, m5
+    paddd           m0, m9
+    psadbw          m5, m4, m6
+    paddd           m1, m5
+    psadbw          m6, m4, m7
+    paddd           m2, m6
+    psadbw          m4, m8
+    paddd           m3, m4
+
+    movu            xm4, [r0 + mmsize]
+    movu            xm5, [r1 + mmsize]
+    movu            xm6, [r2 + mmsize]
+    movu            xm7, [r3 + mmsize]
+    movu            xm8, [r4 + mmsize]
+    vinserti128     m4, m4, [r0 + FENC_STRIDE], 1
+    vinserti128     m5, m5, [r1 + r5], 1
+    vinserti128     m6, m6, [r2 + r5], 1
+    vinserti128     m7, m7, [r3 + r5], 1
+    vinserti128     m8, m8, [r4 + r5], 1
+
+    psadbw          m9, m4, m5
+    paddd           m0, m9
+    psadbw          m5, m4, m6
+    paddd           m1, m5
+    psadbw          m6, m4, m7
+    paddd           m2, m6
+    psadbw          m4, m8
+    paddd           m3, m4
+
+    movu            m4, [r0 + FENC_STRIDE + mmsize/2]
+    movu            m5, [r1 + r5 + mmsize/2]
+    movu            m6, [r2 + r5 + mmsize/2]
+    movu            m7, [r3 + r5 + mmsize/2]
+    movu            m8, [r4 + r5 + mmsize/2]
+
+    psadbw          m9, m4, m5
+    paddd           m0, m9
+    psadbw          m5, m4, m6
+    paddd           m1, m5
+    psadbw          m6, m4, m7
+    paddd           m2, m6
+    psadbw          m4, m8
+    paddd           m3, m4
+
+    movu            m4, [r0 + FENC_STRIDE * 2]
+    movu            m5, [r1 + r5 * 2]
+    movu            m6, [r2 + r5 * 2]
+    movu            m7, [r3 + r5 * 2]
+    movu            m8, [r4 + r5 * 2]
+
+    psadbw          m9, m4, m5
+    paddd           m0, m9
+    psadbw          m5, m4, m6
+    paddd           m1, m5
+    psadbw          m6, m4, m7
+    paddd           m2, m6
+    psadbw          m4, m8
+    paddd           m3, m4
+
+    movu            xm4, [r0 + FENC_STRIDE * 2 + mmsize]
+    movu            xm5, [r1 + r5 * 2 + mmsize]
+    movu            xm6, [r2 + r5 * 2 + mmsize]
+    movu            xm7, [r3 + r5 * 2 + mmsize]
+    movu            xm8, [r4 + r5 * 2 + mmsize]
+    vinserti128     m4, m4, [r0 + FENC_STRIDE * 3], 1
+    vinserti128     m5, m5, [r1 + r7], 1
+    vinserti128     m6, m6, [r2 + r7], 1
+    vinserti128     m7, m7, [r3 + r7], 1
+    vinserti128     m8, m8, [r4 + r7], 1
+
+    psadbw          m9, m4, m5
+    paddd           m0, m9
+    psadbw          m5, m4, m6
+    paddd           m1, m5
+    psadbw          m6, m4, m7
+    paddd           m2, m6
+    psadbw          m4, m8
+    paddd           m3, m4
+
+    movu            m4, [r0 + FENC_STRIDE * 3 + mmsize/2]
+    movu            m5, [r1 + r7 + mmsize/2]
+    movu            m6, [r2 + r7 + mmsize/2]
+    movu            m7, [r3 + r7 + mmsize/2]
+    movu            m8, [r4 + r7 + mmsize/2]
+
+    psadbw          m9, m4, m5
+    paddd           m0, m9
+    psadbw          m5, m4, m6
+    paddd           m1, m5
+    psadbw          m6, m4, m7
+    paddd           m2, m6
+    psadbw          m4, m8
+    paddd           m3, m4
+%endmacro
+
+INIT_YMM avx2
+cglobal pixel_sad_x4_48x64, 7,8,10
+    pxor            m0, m0
+    pxor            m1, m1
+    pxor            m2, m2
+    pxor            m3, m3
+    lea             r7, [r5 * 3]
+
+    SAD_X4_48x8_AVX2
+
+    add             r0, FENC_STRIDE * 4
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+
+    SAD_X4_48x8_AVX2
+
+    add             r0, FENC_STRIDE * 4
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+
+    SAD_X4_48x8_AVX2
+
+    add             r0, FENC_STRIDE * 4
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+
+    SAD_X4_48x8_AVX2
+
+    add             r0, FENC_STRIDE * 4
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+
+    SAD_X4_48x8_AVX2
+
+    add             r0, FENC_STRIDE * 4
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+
+    SAD_X4_48x8_AVX2
+
+    add             r0, FENC_STRIDE * 4
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+
+    SAD_X4_48x8_AVX2
+
+    add             r0, FENC_STRIDE * 4
+    lea             r1, [r1 + r5 * 4]
+    lea             r2, [r2 + r5 * 4]
+    lea             r3, [r3 + r5 * 4]
+    lea             r4, [r4 + r5 * 4]
+
+    SAD_X4_48x8_AVX2
+    PIXEL_SAD_X4_END_AVX2
+    RET
 %endif
 
 INIT_XMM sse2


More information about the x265-devel mailing list