[x265] [PATCH 039 of 307] x86: AVX512 sad_w32 and sad_w64 cleanup

mythreyi at multicorewareinc.com mythreyi at multicorewareinc.com
Sat Apr 7 04:30:37 CEST 2018


# HG changeset patch
# User Vignesh Vijayakumar
# Date 1500621502 -19800
#      Fri Jul 21 12:48:22 2017 +0530
# Node ID be860e68659a37dae543956a65a4eb167f8b5504
# Parent  49123506b563fd44378e856e6833c77812d0349e
x86: AVX512 sad_w32 and sad_w64 cleanup

diff -r 49123506b563 -r be860e68659a source/common/x86/sad-a.asm
--- a/source/common/x86/sad-a.asm	Fri Jul 21 14:32:12 2017 +0530
+++ b/source/common/x86/sad-a.asm	Fri Jul 21 12:48:22 2017 +0530
@@ -7565,29 +7565,79 @@
     movd            eax, xm0
     RET
 
-;-----------------------------------------------------------------------------
-; int pixel_sad_64x%1( uint8_t *, intptr_t, uint8_t *, intptr_t )
-;-----------------------------------------------------------------------------
-%macro PIXEL_SAD_W64_AVX512 1
-INIT_ZMM avx512
-cglobal pixel_sad_64x%1, 4,5,6
-    xorps           m0, m0
-    xorps           m5, m5
-
-%rep %1/2
-    movu           m1, [r0]               ; first 64 of row 0 of pix0
-    movu           m2, [r2]               ; first 64 of row 0 of pix1
-    movu           m3, [r0 + r1]          ; first 64 of row 1 of pix0
-    movu           m4, [r2 + r3]          ; first 64 of row 1 of pix1
+%macro PROCESS_SAD_64x8_AVX512 0
+    movu           m1, [r0]
+    movu           m2, [r2]
+    movu           m3, [r0 + r1]
+    movu           m4, [r2 + r3]
     psadbw         m1, m2
     psadbw         m3, m4
     paddd          m0, m1
     paddd          m5, m3
-    lea            r2, [r2 + 2 * r3]
-    lea            r0, [r0 + 2 * r1]
-%endrep
-
-    paddd          m0, m5
+    movu           m1, [r0 + 2 * r1]
+    movu           m2, [r2 + 2 * r3]
+    movu           m3, [r0 + r5]
+    movu           m4, [r2 + r6]
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+
+    movu           m1, [r0]
+    movu           m2, [r2]
+    movu           m3, [r0 + r1]
+    movu           m4, [r2 + r3]
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+    movu           m1, [r0 + 2 * r1]
+    movu           m2, [r2 + 2 * r3]
+    movu           m3, [r0 + r5]
+    movu           m4, [r2 + r6]
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+%endmacro
+
+%macro PROCESS_SAD_32x8_AVX512 0
+    movu           ym1, [r0]
+    movu           ym2, [r2]  
+    vinserti32x8    m1, [r0 + r1], 1 
+    vinserti32x8    m2, [r2 + r3], 1
+    movu           ym3, [r0 + 2 * r1]
+    movu           ym4, [r2 + 2 * r3]
+    vinserti32x8    m3, [r0 + r5], 1
+    vinserti32x8    m4, [r2 + r6], 1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m0, m3
+
+    lea            r2,     [r2 + 4 * r3]
+    lea            r0,     [r0 + 4 * r1]
+
+    movu           ym1, [r0]
+    movu           ym2, [r2]
+    vinserti32x8    m1, [r0 + r1], 1
+    vinserti32x8    m2, [r2 + r3], 1
+    movu           ym3, [r0 + 2 * r1]
+    movu           ym4, [r2 + 2 * r3]
+    vinserti32x8    m3, [r0 + r5], 1
+    vinserti32x8    m4, [r2 + r6], 1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m0, m3
+%endmacro
+
+%macro PROCESS_SAD_AVX512_END 0
     vextracti32x8  ym1, m0, 1
     paddd          ym0, ym1
     vextracti64x2  xm1, m0, 1
@@ -7595,53 +7645,195 @@
     pshufd         xm1, xm0, 2
     paddd          xm0, xm1
     movd           eax, xm0
+%endmacro
+;-----------------------------------------------------------------------------
+; int pixel_sad_64x%1( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_ZMM avx512
+cglobal pixel_sad_64x16, 4,5,6
+    xorps           m0, m0
+    xorps           m5, m5
+    lea             r5, [3 * r1]
+    lea             r6, [3 * r3]
+
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    paddd          m0, m5
+    PROCESS_SAD_AVX512_END
     RET
-%endmacro
-
-PIXEL_SAD_W64_AVX512 16
-PIXEL_SAD_W64_AVX512 32
-PIXEL_SAD_W64_AVX512 48
-PIXEL_SAD_W64_AVX512 64
-
-%macro PIXEL_SAD_W32_AVX512 1
+
 INIT_ZMM avx512
-cglobal pixel_sad_32x%1, 4,7,5
+cglobal pixel_sad_64x32, 4,5,6
+    xorps           m0, m0
+    xorps           m5, m5
+    lea             r5, [3 * r1]
+    lea             r6, [3 * r3]
+
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    paddd          m0, m5
+    PROCESS_SAD_AVX512_END
+    RET
+
+INIT_ZMM avx512
+cglobal pixel_sad_64x48, 4,5,6
+    xorps           m0, m0
+    xorps           m5, m5
+    lea             r5, [3 * r1]
+    lea             r6, [3 * r3]
+
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    paddd          m0, m5
+    PROCESS_SAD_AVX512_END
+    RET
+
+INIT_ZMM avx512
+cglobal pixel_sad_64x64, 4,5,6
+    xorps           m0, m0
+    xorps           m5, m5
+    lea             r5, [3 * r1]
+    lea             r6, [3 * r3]
+
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_64x8_AVX512
+    paddd          m0, m5
+    PROCESS_SAD_AVX512_END
+    RET
+
+;-----------------------------------------------------------------------------
+; int pixel_sad_32x%1( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+INIT_ZMM avx512
+cglobal pixel_sad_32x8, 4,7,5
     xorps           m0, m0
     lea             r5, [r1 * 3]
     lea             r6, [r3 * 3]
 
-%rep %1/4
-    movu           ym1, [r0]               ; row 0 of pix0
-    movu           ym2, [r2]               ; row 0 of pix1
-    vinserti32x8    m1, [r0 + r1], 1       ; row 1 of pix0
-    vinserti32x8    m2, [r2 + r3], 1       ; row 1 of pix1
-    movu           ym3, [r0 + 2 * r1]      ; row 2 of pix0
-    movu           ym4, [r2 + 2 * r3]      ; row 2 of pix1
-    vinserti32x8    m3, [r0 + r5], 1       ; row 3 of pix0
-    vinserti32x8    m4, [r2 + r6], 1       ; row 3 of pix1
-
-    psadbw         m1, m2
-    psadbw         m3, m4
-    paddd          m0, m1
-    paddd          m0, m3
-
-    lea            r2,     [r2 + 4 * r3]
-    lea            r0,     [r0 + 4 * r1]
-%endrep
-
-    vextracti32x8  ym1, m0, 1
-    paddd          ym0, ym1
-    vextracti64x2  xm1, m0, 1
-    paddd          xm0, xm1
-    pshufd         xm1, xm0, 2
-    paddd          xm0, xm1
-    movd           eax, xm0
+    PROCESS_SAD_32x8_AVX512
+    PROCESS_SAD_AVX512_END
     RET
-%endmacro
-
-PIXEL_SAD_W32_AVX512 8
-PIXEL_SAD_W32_AVX512 16
-PIXEL_SAD_W32_AVX512 24
-PIXEL_SAD_W32_AVX512 32
-PIXEL_SAD_W32_AVX512 64
+
+INIT_ZMM avx512
+cglobal pixel_sad_32x16, 4,7,5
+    xorps           m0, m0
+    lea             r5, [r1 * 3]
+    lea             r6, [r3 * 3]
+
+    PROCESS_SAD_32x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_32x8_AVX512
+    PROCESS_SAD_AVX512_END
+    RET
+
+INIT_ZMM avx512
+cglobal pixel_sad_32x24, 4,7,5
+    xorps           m0, m0
+    lea             r5, [r1 * 3]
+    lea             r6, [r3 * 3]
+
+    PROCESS_SAD_32x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_32x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_32x8_AVX512
+    PROCESS_SAD_AVX512_END
+    RET
+
+INIT_ZMM avx512
+cglobal pixel_sad_32x32, 4,7,5
+    xorps           m0, m0
+    lea             r5, [r1 * 3]
+    lea             r6, [r3 * 3]
+
+    PROCESS_SAD_32x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_32x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_32x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_32x8_AVX512
+    PROCESS_SAD_AVX512_END
+    RET
+
+INIT_ZMM avx512
+cglobal pixel_sad_32x64, 4,7,5
+    xorps           m0, m0
+    lea             r5, [r1 * 3]
+    lea             r6, [r3 * 3]
+
+    PROCESS_SAD_32x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_32x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_32x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_32x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_32x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_32x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_32x8_AVX512
+    lea            r2, [r2 + 4 * r3]
+    lea            r0, [r0 + 4 * r1]
+    PROCESS_SAD_32x8_AVX512
+    PROCESS_SAD_AVX512_END
+    RET
 %endif


More information about the x265-devel mailing list