[x265] [PATCH 4 of 7] asm: avx2 code for sad[64x64] for 8bpp

sumalatha at multicorewareinc.com sumalatha at multicorewareinc.com
Tue Mar 17 06:42:43 CET 2015


# HG changeset patch
# User Sumalatha Polureddy<sumalatha at multicorewareinc.com>
# Date 1426570425 -19800
# Node ID 5f1176737636b1b6f51ff424f99bcadcdc61d235
# Parent  3ffa0f5c3e9dcc46d872594c311c66f472d3b7fe
asm: avx2 code for sad[64x64] for 8bpp

see3
sad[64x64]  35.07x   2433.85         85363.52

avx2
sad[64x64]  59.68x   1430.24         85353.53

diff -r 3ffa0f5c3e9d -r 5f1176737636 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Tue Mar 17 11:00:07 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp	Tue Mar 17 11:03:45 2015 +0530
@@ -1448,6 +1448,7 @@
         p.pu[LUMA_64x16].sad = x265_pixel_sad_64x16_avx2;
         p.pu[LUMA_64x32].sad = x265_pixel_sad_64x32_avx2;
         p.pu[LUMA_64x48].sad = x265_pixel_sad_64x48_avx2;
+        p.pu[LUMA_64x64].sad = x265_pixel_sad_64x64_avx2;
 
         p.pu[LUMA_8x4].sad_x3 = x265_pixel_sad_x3_8x4_avx2;
         p.pu[LUMA_8x8].sad_x3 = x265_pixel_sad_x3_8x8_avx2;
diff -r 3ffa0f5c3e9d -r 5f1176737636 source/common/x86/sad-a.asm
--- a/source/common/x86/sad-a.asm	Tue Mar 17 11:00:07 2015 +0530
+++ b/source/common/x86/sad-a.asm	Tue Mar 17 11:03:45 2015 +0530
@@ -4081,4 +4081,113 @@
     movd            eax, xm0
     RET
 
+INIT_YMM avx2
+cglobal pixel_sad_64x64, 4,5,6
+    xorps           m0, m0
+    xorps           m5, m5
+    mov             r4d, 8
+.loop
+    movu           m1, [r0]               ; first 32 of row 0 of pix0
+    movu           m2, [r2]               ; first 32 of row 0 of pix1
+    movu           m3, [r0 + 32]          ; second 32 of row 0 of pix0
+    movu           m4, [r2 + 32]          ; second 32 of row 0 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    movu           m1, [r0 + r1]          ; first 32 of row 1 of pix0
+    movu           m2, [r2 + r3]          ; first 32 of row 1 of pix1
+    movu           m3, [r0 + 32 + r1]     ; second 32 of row 1 of pix0
+    movu           m4, [r2 + 32 + r3]     ; second 32 of row 1 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    lea     r2,     [r2 + 2 * r3]
+    lea     r0,     [r0 + 2 * r1]
+
+    movu           m1, [r0]               ; first 32 of row 2 of pix0
+    movu           m2, [r2]               ; first 32 of row 2 of pix1
+    movu           m3, [r0 + 32]          ; second 32 of row 2 of pix0
+    movu           m4, [r2 + 32]          ; second 32 of row 2 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    movu           m1, [r0 + r1]          ; first 32 of row 3 of pix0
+    movu           m2, [r2 + r3]          ; first 32 of row 3 of pix1
+    movu           m3, [r0 + 32 + r1]     ; second 32 of row 3 of pix0
+    movu           m4, [r2 + 32 + r3]     ; second 32 of row 3 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    lea     r2,     [r2 + 2 * r3]
+    lea     r0,     [r0 + 2 * r1]
+
+    movu           m1, [r0]               ; first 32 of row 4 of pix0
+    movu           m2, [r2]               ; first 32 of row 4 of pix1
+    movu           m3, [r0 + 32]          ; second 32 of row 4 of pix0
+    movu           m4, [r2 + 32]          ; second 32 of row 4 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    movu           m1, [r0 + r1]          ; first 32 of row 5 of pix0
+    movu           m2, [r2 + r3]          ; first 32 of row 5 of pix1
+    movu           m3, [r0 + 32 + r1]     ; second 32 of row 5 of pix0
+    movu           m4, [r2 + 32 + r3]     ; second 32 of row 5 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    lea     r2,     [r2 + 2 * r3]
+    lea     r0,     [r0 + 2 * r1]
+
+    movu           m1, [r0]               ; first 32 of row 6 of pix0
+    movu           m2, [r2]               ; first 32 of row 6 of pix1
+    movu           m3, [r0 + 32]          ; second 32 of row 6 of pix0
+    movu           m4, [r2 + 32]          ; second 32 of row 6 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    movu           m1, [r0 + r1]          ; first 32 of row 7 of pix0
+    movu           m2, [r2 + r3]          ; first 32 of row 7 of pix1
+    movu           m3, [r0 + 32 + r1]     ; second 32 of row 7 of pix0
+    movu           m4, [r2 + 32 + r3]     ; second 32 of row 7 of pix1
+
+    psadbw         m1, m2
+    psadbw         m3, m4
+    paddd          m0, m1
+    paddd          m5, m3
+
+    lea     r2,     [r2 + 2 * r3]
+    lea     r0,     [r0 + 2 * r1]
+
+    dec         r4d
+    jnz         .loop
+
+    paddd          m0, m5
+    vextracti128   xm1, m0, 1
+    paddd          xm0, xm1
+    pshufd         xm1, xm0, 2
+    paddd          xm0,xm1
+    movd            eax, xm0
+    RET
+
 %endif


More information about the x265-devel mailing list