[x265] [PATCH 027 of 307] x86: AVX512 pixel_sad_x3_32xN

mythreyi at multicorewareinc.com mythreyi at multicorewareinc.com
Sat Apr 7 04:30:25 CEST 2018


# HG changeset patch
# User Vignesh Vijayakumar
# Date 1500208511 -19800
#      Sun Jul 16 18:05:11 2017 +0530
# Node ID 5a2d94db6fcaabf532f00848a72fa337bb5e65ac
# Parent  20ca79c2c6a803e2c6caf0c1dc87fb211ea9f708
x86: AVX512 pixel_sad_x3_32xN

Size  | AVX2 performance | AVX512 performance
---------------------------------------------
32x8  |     55.55x       |     65.60x
32x16 |     54.95x       |     67.83x
32x24 |     57.95x       |     72.69x
32x32 |     64.35x       |     76.33x
32x64 |     65.02x       |     82.61x

diff -r 20ca79c2c6a8 -r 5a2d94db6fca source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Fri Jul 14 11:49:50 2017 +0530
+++ b/source/common/x86/asm-primitives.cpp	Sun Jul 16 18:05:11 2017 +0530
@@ -3736,6 +3736,11 @@
         p.pu[LUMA_64x48].sad = PFX(pixel_sad_64x48_avx512);
         p.pu[LUMA_64x64].sad = PFX(pixel_sad_64x64_avx512);
 
+        p.pu[LUMA_32x8].sad_x3 = PFX(pixel_sad_x3_32x8_avx512);
+        p.pu[LUMA_32x16].sad_x3 = PFX(pixel_sad_x3_32x16_avx512);
+        p.pu[LUMA_32x24].sad_x3 = PFX(pixel_sad_x3_32x24_avx512);
+        p.pu[LUMA_32x32].sad_x3 = PFX(pixel_sad_x3_32x32_avx512);
+        p.pu[LUMA_32x64].sad_x3 = PFX(pixel_sad_x3_32x64_avx512);
         p.pu[LUMA_64x16].sad_x3 = PFX(pixel_sad_x3_64x16_avx512);
         p.pu[LUMA_64x32].sad_x3 = PFX(pixel_sad_x3_64x32_avx512);
         p.pu[LUMA_64x48].sad_x3 = PFX(pixel_sad_x3_64x48_avx512);
diff -r 20ca79c2c6a8 -r 5a2d94db6fca source/common/x86/sad-a.asm
--- a/source/common/x86/sad-a.asm	Fri Jul 14 11:49:50 2017 +0530
+++ b/source/common/x86/sad-a.asm	Sun Jul 16 18:05:11 2017 +0530
@@ -6235,6 +6235,77 @@
     paddd           m2, m3
 %endmacro
 
+%macro SAD_X3_32x8_AVX512 0
+    movu            ym3, [r0]
+    vinserti32x8    m3, [r0 + FENC_STRIDE], 1
+    movu            ym4, [r1]
+    vinserti32x8    m4, [r1 + r4], 1
+    movu            ym5, [r2]
+    vinserti32x8    m5, [r2 + r4], 1
+    movu            ym6, [r3]
+    vinserti32x8    m6, [r3 + r4], 1
+
+    psadbw          m7, m3, m4
+    paddd           m0, m7
+    psadbw          m4, m3, m5
+    paddd           m1, m4
+    psadbw          m3, m6
+    paddd           m2, m3
+
+    movu            ym3, [r0 + FENC_STRIDE * 2]
+    vinserti32x8     m3, [r0 + FENC_STRIDE * 3], 1
+    movu            ym4, [r1 + r4 * 2]
+    vinserti32x8     m4, [r1 + r6], 1
+    movu            ym5, [r2 + r4 * 2]
+    vinserti32x8     m5, [r2 + r6], 1
+    movu            ym6, [r3 + r4 * 2]
+    vinserti32x8     m6, [r3 + r6], 1
+
+    psadbw          m7, m3, m4
+    paddd           m0, m7
+    psadbw          m4, m3, m5
+    paddd           m1, m4
+    psadbw          m3, m6
+    paddd           m2, m3
+
+    add             r0, FENC_STRIDE * 4
+    lea             r1, [r1 + r4 * 4]
+    lea             r2, [r2 + r4 * 4]
+    lea             r3, [r3 + r4 * 4]
+
+    movu            ym3, [r0]
+    vinserti32x8    m3, [r0 + FENC_STRIDE], 1
+    movu            ym4, [r1]
+    vinserti32x8    m4, [r1 + r4], 1
+    movu            ym5, [r2]
+    vinserti32x8    m5, [r2 + r4], 1
+    movu            ym6, [r3]
+    vinserti32x8    m6, [r3 + r4], 1
+
+    psadbw          m7, m3, m4
+    paddd           m0, m7
+    psadbw          m4, m3, m5
+    paddd           m1, m4
+    psadbw          m3, m6
+    paddd           m2, m3
+
+    movu            ym3, [r0 + FENC_STRIDE * 2]
+    vinserti32x8     m3, [r0 + FENC_STRIDE * 3], 1
+    movu            ym4, [r1 + r4 * 2]
+    vinserti32x8     m4, [r1 + r6], 1
+    movu            ym5, [r2 + r4 * 2]
+    vinserti32x8     m5, [r2 + r6], 1
+    movu            ym6, [r3 + r4 * 2]
+    vinserti32x8     m6, [r3 + r6], 1
+
+    psadbw          m7, m3, m4
+    paddd           m0, m7
+    psadbw          m4, m3, m5
+    paddd           m1, m4
+    psadbw          m3, m6
+    paddd           m2, m3
+%endmacro
+
 %macro PIXEL_SAD_X3_END_AVX512 0
     vextracti32x8   ym3, m0, 1
     vextracti32x8   ym4, m1, 1
@@ -6382,6 +6453,126 @@
     SAD_X3_64x8_AVX512
     PIXEL_SAD_X3_END_AVX512
     RET
+
+INIT_ZMM avx512
+cglobal pixel_sad_x3_32x8, 6,7,8
+    pxor            m0, m0
+    pxor            m1, m1
+    pxor            m2, m2
+    lea             r6, [r4 * 3]
+
+    SAD_X3_32x8_AVX512
+    PIXEL_SAD_X3_END_AVX512
+    RET
+
+INIT_ZMM avx512
+cglobal pixel_sad_x3_32x16, 6,7,8
+    pxor            m0, m0
+    pxor            m1, m1
+    pxor            m2, m2
+    lea             r6, [r4 * 3]
+
+    SAD_X3_32x8_AVX512
+    add             r0, FENC_STRIDE * 4
+    lea             r1, [r1 + r4 * 4]
+    lea             r2, [r2 + r4 * 4]
+    lea             r3, [r3 + r4 * 4]
+    SAD_X3_32x8_AVX512
+    PIXEL_SAD_X3_END_AVX512
+    RET
+
+INIT_ZMM avx512
+cglobal pixel_sad_x3_32x24, 6,7,8
+    pxor            m0, m0
+    pxor            m1, m1
+    pxor            m2, m2
+    lea             r6, [r4 * 3]
+
+    SAD_X3_32x8_AVX512
+    add             r0, FENC_STRIDE * 4
+    lea             r1, [r1 + r4 * 4]
+    lea             r2, [r2 + r4 * 4]
+    lea             r3, [r3 + r4 * 4]
+    SAD_X3_32x8_AVX512
+    add             r0, FENC_STRIDE * 4
+    lea             r1, [r1 + r4 * 4]
+    lea             r2, [r2 + r4 * 4]
+    lea             r3, [r3 + r4 * 4]
+    SAD_X3_32x8_AVX512
+    PIXEL_SAD_X3_END_AVX512
+    RET
+
+INIT_ZMM avx512
+cglobal pixel_sad_x3_32x32, 6,7,8
+    pxor            m0, m0
+    pxor            m1, m1
+    pxor            m2, m2
+    lea             r6, [r4 * 3]
+
+    SAD_X3_32x8_AVX512
+    add             r0, FENC_STRIDE * 4
+    lea             r1, [r1 + r4 * 4]
+    lea             r2, [r2 + r4 * 4]
+    lea             r3, [r3 + r4 * 4]
+    SAD_X3_32x8_AVX512
+    add             r0, FENC_STRIDE * 4
+    lea             r1, [r1 + r4 * 4]
+    lea             r2, [r2 + r4 * 4]
+    lea             r3, [r3 + r4 * 4]
+    SAD_X3_32x8_AVX512
+    add             r0, FENC_STRIDE * 4
+    lea             r1, [r1 + r4 * 4]
+    lea             r2, [r2 + r4 * 4]
+    lea             r3, [r3 + r4 * 4]
+    SAD_X3_32x8_AVX512
+    PIXEL_SAD_X3_END_AVX512
+    RET
+
+INIT_ZMM avx512
+cglobal pixel_sad_x3_32x64, 6,7,8
+    pxor            m0, m0
+    pxor            m1, m1
+    pxor            m2, m2
+    lea             r6, [r4 * 3]
+
+    SAD_X3_32x8_AVX512
+    add             r0, FENC_STRIDE * 4
+    lea             r1, [r1 + r4 * 4]
+    lea             r2, [r2 + r4 * 4]
+    lea             r3, [r3 + r4 * 4]
+    SAD_X3_32x8_AVX512
+    add             r0, FENC_STRIDE * 4
+    lea             r1, [r1 + r4 * 4]
+    lea             r2, [r2 + r4 * 4]
+    lea             r3, [r3 + r4 * 4]
+    SAD_X3_32x8_AVX512
+    add             r0, FENC_STRIDE * 4
+    lea             r1, [r1 + r4 * 4]
+    lea             r2, [r2 + r4 * 4]
+    lea             r3, [r3 + r4 * 4]
+    SAD_X3_32x8_AVX512
+    add             r0, FENC_STRIDE * 4
+    lea             r1, [r1 + r4 * 4]
+    lea             r2, [r2 + r4 * 4]
+    lea             r3, [r3 + r4 * 4]
+    SAD_X3_32x8_AVX512
+    add             r0, FENC_STRIDE * 4
+    lea             r1, [r1 + r4 * 4]
+    lea             r2, [r2 + r4 * 4]
+    lea             r3, [r3 + r4 * 4]
+    SAD_X3_32x8_AVX512
+    add             r0, FENC_STRIDE * 4
+    lea             r1, [r1 + r4 * 4]
+    lea             r2, [r2 + r4 * 4]
+    lea             r3, [r3 + r4 * 4]
+    SAD_X3_32x8_AVX512
+    add             r0, FENC_STRIDE * 4
+    lea             r1, [r1 + r4 * 4]
+    lea             r2, [r2 + r4 * 4]
+    lea             r3, [r3 + r4 * 4]
+    SAD_X3_32x8_AVX512
+    PIXEL_SAD_X3_END_AVX512
+    RET
 ;------------------------------------------------------------
 ;sad_x3 avx512 code end
 ;------------------------------------------------------------


More information about the x265-devel mailing list