[x265] [PATCH 1 of 7] asm: avx2 code for sad[64x16] for 8bpp

sumalatha at multicorewareinc.com sumalatha at multicorewareinc.com
Tue Mar 17 06:42:40 CET 2015


# HG changeset patch
# User Sumalatha Polureddy<sumalatha at multicorewareinc.com>
# Date 1426569967 -19800
# Node ID 5f185f3b799262f20d8824c3166e09dff488a666
# Parent  b9948752d5516a72eeaf824e3ee6f0feb097381c
asm: avx2 code for sad[64x16] for 8bpp

see3
sad[64x16]  27.74x   733.63          20349.20

avx2
sad[64x16]  50.63x   424.97          21517.36

diff -r b9948752d551 -r 5f185f3b7992 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Mon Mar 16 20:40:12 2015 -0500
+++ b/source/common/x86/asm-primitives.cpp	Tue Mar 17 10:56:07 2015 +0530
@@ -1445,6 +1445,7 @@
         p.pu[LUMA_8x8].satd   = x265_pixel_satd_8x8_avx2;
 
         p.pu[LUMA_32x32].sad = x265_pixel_sad_32x32_avx2;
+        p.pu[LUMA_64x16].sad = x265_pixel_sad_64x16_avx2;
 
         p.pu[LUMA_8x4].sad_x3 = x265_pixel_sad_x3_8x4_avx2;
         p.pu[LUMA_8x8].sad_x3 = x265_pixel_sad_x3_8x8_avx2;
diff -r b9948752d551 -r 5f185f3b7992 source/common/x86/sad-a.asm
--- a/source/common/x86/sad-a.asm	Mon Mar 16 20:40:12 2015 -0500
+++ b/source/common/x86/sad-a.asm	Tue Mar 17 10:56:07 2015 +0530
@@ -3938,4 +3938,67 @@
     movd            eax, xm0
     RET
 
+INIT_YMM avx2
+cglobal pixel_sad_64x16, 4,5,6
+    xorps           m0, m0
+    xorps           m5, m5
+    mov             r4d, 4
+.loop
+    movu           m1, [r0]               ; first 32 of row 0 of pix0
+    movu           m2, [r2]               ; first 32 of row 0 of pix1
+    movu           m3, [r0 + 32]          ; second 32 of row 0 of pix0
+    movu           m4, [r2 + 32]          ; second 32 of row 0 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    movu           m1, [r0 + r1]          ; first 32 of row 1 of pix0
+    movu           m2, [r2 + r3]          ; first 32 of row 1 of pix1
+    movu           m3, [r0 + 32 + r1]     ; second 32 of row 1 of pix0
+    movu           m4, [r2 + 32 + r3]     ; second 32 of row 1 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    lea     r2,     [r2 + 2 * r3]
+    lea     r0,     [r0 + 2 * r1]
+
+    movu           m1, [r0]               ; first 32 of row 2 of pix0
+    movu           m2, [r2]               ; first 32 of row 2 of pix1
+    movu           m3, [r0 + 32]          ; second 32 of row 2 of pix0
+    movu           m4, [r2 + 32]          ; second 32 of row 2 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    movu           m1, [r0 + r1]          ; first 32 of row 3 of pix0
+    movu           m2, [r2 + r3]          ; first 32 of row 3 of pix1
+    movu           m3, [r0 + 32 + r1]     ; second 32 of row 3 of pix0
+    movu           m4, [r2 + 32 + r3]     ; second 32 of row 3 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    lea     r2,     [r2 + 2 * r3]
+    lea     r0,     [r0 + 2 * r1]
+
+    dec         r4d
+    jnz         .loop
+
+    paddd          m0, m5
+    vextracti128   xm1, m0, 1
+    paddd          xm0, xm1
+    pshufd         xm1, xm0, 2
+    paddd          xm0,xm1
+    movd            eax, xm0
+    RET
+
 %endif


More information about the x265-devel mailing list