[x265] [PATCH 057 of 307] [x265-avx512]x86: AVX512 pixel_sad_64xN for high bit depth

mythreyi at multicorewareinc.com mythreyi at multicorewareinc.com
Sat Apr 7 04:30:55 CEST 2018


# HG changeset patch
# User Gopi Satykrishna Akisetty <gopi.satykrishna at multicorewareinc.com>
# Date 1501653512 -19800
#      Wed Aug 02 11:28:32 2017 +0530
# Node ID b355ac2912dd111b96dbb5893b34405863e7382f
# Parent  784aff4e987c17e2ece9bd3484b256f97f3640f5
[x265-avx512]x86: AVX512 pixel_sad_64xN for high bit depth

Size    | AVX2 performance | AVX512 performance
------------------------------------------------
64x16   |     27.47x       |      43.37x
64x32   |     28.41x       |      46.45x
64x48   |     26.51x       |      48.47x
64x64   |     28.74x       |      48.76x

diff -r 784aff4e987c -r b355ac2912dd source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Wed Aug 02 11:28:32 2017 +0530
+++ b/source/common/x86/asm-primitives.cpp	Wed Aug 02 11:28:32 2017 +0530
@@ -2261,6 +2261,10 @@
         p.pu[LUMA_32x24].sad = PFX(pixel_sad_32x24_avx512);
         p.pu[LUMA_32x32].sad = PFX(pixel_sad_32x32_avx512);
         p.pu[LUMA_32x64].sad = PFX(pixel_sad_32x64_avx512);
+        p.pu[LUMA_64x16].sad = PFX(pixel_sad_64x16_avx512);
+        p.pu[LUMA_64x32].sad = PFX(pixel_sad_64x32_avx512);
+        p.pu[LUMA_64x48].sad = PFX(pixel_sad_64x48_avx512);
+        p.pu[LUMA_64x64].sad = PFX(pixel_sad_64x64_avx512);
 
         p.pu[LUMA_32x8].sad = PFX(pixel_sad_32x8_avx512);
         p.pu[LUMA_32x16].sad = PFX(pixel_sad_32x16_avx512);
diff -r 784aff4e987c -r b355ac2912dd source/common/x86/sad16-a.asm
--- a/source/common/x86/sad16-a.asm	Wed Aug 02 11:28:32 2017 +0530
+++ b/source/common/x86/sad16-a.asm	Wed Aug 02 11:28:32 2017 +0530
@@ -1234,6 +1234,86 @@
     paddd   m0, m1
 %endmacro
 
+
+%macro PROCESS_SAD_64x8_AVX512 0
+    movu    m1, [r2]
+    movu    m2, [r2 + mmsize]
+    movu    m3, [r2 + r3]
+    movu    m4, [r2 + r3 + mmsize]
+    psubw   m1, [r0]
+    psubw   m2, [r0 + mmsize]
+    psubw   m3, [r0 + r1]
+    psubw   m4, [r0 + r1 + mmsize]
+    pabsw   m1, m1
+    pabsw   m2, m2
+    pabsw   m3, m3
+    pabsw   m4, m4
+    paddw   m1, m2
+    paddw   m3, m4
+    paddw   m5, m1, m3
+
+    movu    m1, [r2 + 2 * r3]
+    movu    m2, [r2 + 2 * r3 + mmsize]
+    movu    m3, [r2 + r5]
+    movu    m4, [r2 + r5 + mmsize]
+    psubw   m1, [r0 + 2 * r1]
+    psubw   m2, [r0 + 2 * r1 + mmsize]
+    psubw   m3, [r0 + r4]
+    psubw   m4, [r0 + r4 + mmsize]
+    pabsw   m1, m1
+    pabsw   m2, m2
+    pabsw   m3, m3
+    pabsw   m4, m4
+    paddw   m1, m2
+    paddw   m3, m4
+    paddw   m1, m3
+
+    lea     r0, [r0 + 4 * r1]
+    lea     r2, [r2 + 4 * r3]
+
+    pmaddwd m5, m6
+    paddd   m0, m5
+    pmaddwd m1, m6
+    paddd   m0, m1
+
+    movu    m1, [r2]
+    movu    m2, [r2 + mmsize]
+    movu    m3, [r2 + r3]
+    movu    m4, [r2 + r3 + mmsize]
+    psubw   m1, [r0]
+    psubw   m2, [r0 + mmsize]
+    psubw   m3, [r0 + r1]
+    psubw   m4, [r0 + r1 + mmsize]
+    pabsw   m1, m1
+    pabsw   m2, m2
+    pabsw   m3, m3
+    pabsw   m4, m4
+    paddw   m1, m2
+    paddw   m3, m4
+    paddw   m5, m1, m3
+
+    movu    m1, [r2 + 2 * r3]
+    movu    m2, [r2 + 2 * r3 + mmsize]
+    movu    m3, [r2 + r5]
+    movu    m4, [r2 + r5 + mmsize]
+    psubw   m1, [r0 + 2 * r1]
+    psubw   m2, [r0 + 2 * r1 + mmsize]
+    psubw   m3, [r0 + r4]
+    psubw   m4, [r0 + r4 + mmsize]
+    pabsw   m1, m1
+    pabsw   m2, m2
+    pabsw   m3, m3
+    pabsw   m4, m4
+    paddw   m1, m2
+    paddw   m3, m4
+    paddw   m1, m3
+
+    pmaddwd m5, m6
+    paddd   m0, m5
+    pmaddwd m1, m6
+    paddd   m0, m1
+%endmacro
+
 %macro PROCESS_SAD_32x8_AVX512 0
     movu    m1, [r2]
     movu    m2, [r2 + r3]
@@ -1572,6 +1652,116 @@
     PROCESS_SAD_AVX512_END
     RET
 
+;-----------------------------------------------------------------------------
+; int pixel_sad_64x%1( uint16_t *, intptr_t, uint16_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_ZMM avx512
+cglobal pixel_sad_64x16, 4,6,7
+    pxor    m0, m0
+
+    vbroadcasti32x8 m6, [pw_1]
+
+    add     r3d, r3d
+    add     r1d, r1d
+    lea     r4d, [r1 * 3]
+    lea     r5d, [r3 * 3]
+
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    PROCESS_SAD_AVX512_END
+    RET
+
+INIT_ZMM avx512
+cglobal pixel_sad_64x32, 4,6,7
+    pxor    m0, m0
+
+    vbroadcasti32x8 m6, [pw_1]
+
+    add     r3d, r3d
+    add     r1d, r1d
+    lea     r4d, [r1 * 3]
+    lea     r5d, [r3 * 3]
+
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    PROCESS_SAD_AVX512_END
+    RET
+
+INIT_ZMM avx512
+cglobal pixel_sad_64x48, 4,6,7
+    pxor    m0, m0
+
+    vbroadcasti32x8 m6, [pw_1]
+
+    add     r3d, r3d
+    add     r1d, r1d
+    lea     r4d, [r1 * 3]
+    lea     r5d, [r3 * 3]
+
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    PROCESS_SAD_AVX512_END
+    RET
+
+INIT_ZMM avx512
+cglobal pixel_sad_64x64, 4,6,7
+   pxor    m0, m0
+
+    vbroadcasti32x8 m6, [pw_1]
+
+    add     r3d, r3d
+    add     r1d, r1d
+    lea     r4d, [r1 * 3]
+    lea     r5d, [r3 * 3]
+
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    PROCESS_SAD_AVX512_END
+    RET
 
 ;-----------------------------------------------------------------------------
 ; int pixel_sad_32x%1( uint16_t *, intptr_t, uint16_t *, intptr_t )


More information about the x265-devel mailing list