[x265] [PATCH] asm: avx2 code for sad_x3[8x8] for 8bpp - 52x

sumalatha at multicorewareinc.com sumalatha at multicorewareinc.com
Mon Mar 9 09:47:59 CET 2015


# HG changeset patch
# User Sumalatha Polureddy<sumalatha at multicorewareinc.com>
# Date 1425889237 -19800
# Node ID b52c021d220ec4a88d798a794b57cbfc72678025
# Parent  043c2418864b0a3ada6f597e6def6ead73d90b5f
asm: avx2 code for sad_x3[8x8] for 8bpp - 52x

sad_x3[  8x8]  52.83x   402.47          21264.02

diff -r 043c2418864b -r b52c021d220e source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Fri Mar 06 13:15:55 2015 -0600
+++ b/source/common/x86/asm-primitives.cpp	Mon Mar 09 13:50:37 2015 +0530
@@ -1442,6 +1442,8 @@
         p.pu[LUMA_8x16].satd  = x265_pixel_satd_8x16_avx2;
         p.pu[LUMA_8x8].satd   = x265_pixel_satd_8x8_avx2;
 
+        p.pu[LUMA_8x8].sad_x3 = x265_pixel_sad_x3_8x8_avx2;
+
         p.pu[LUMA_16x8].sad_x4  = x265_pixel_sad_x4_16x8_avx2;
         p.pu[LUMA_16x12].sad_x4 = x265_pixel_sad_x4_16x12_avx2;
         p.pu[LUMA_16x16].sad_x4 = x265_pixel_sad_x4_16x16_avx2;
diff -r 043c2418864b -r b52c021d220e source/common/x86/sad-a.asm
--- a/source/common/x86/sad-a.asm	Fri Mar 06 13:15:55 2015 -0600
+++ b/source/common/x86/sad-a.asm	Mon Mar 09 13:50:37 2015 +0530
@@ -3710,3 +3710,101 @@
 SADX34_CACHELINE_FUNC 16, 16, 64, sse2, ssse3, ssse3
 SADX34_CACHELINE_FUNC 16,  8, 64, sse2, ssse3, ssse3
 
+%if HIGH_BIT_DEPTH==0
+INIT_YMM avx2
+cglobal pixel_sad_x3_8x8, 6, 7, 7, pix1, pix2, pix3, pix4, frefstride, res
+    pxor           xm0, xm0
+    mova           xm1, xm0
+    mova           xm2, xm0
+
+    movq           xm3, [r0]
+    movhps         xm3, [r0 + FENC_STRIDE]
+    movq           xm4, [r0 + 2 * FENC_STRIDE]
+    movhps         xm4, [r0 + FENC_STRIDE + 2 * FENC_STRIDE]
+    vinserti128    m3, m3, xm4, 1                                 ; pix1
+
+    movq           xm4, [r1]
+    movhps         xm4, [r1 + r4]
+    lea            r1,  [r1 + 2 * r4]
+    movq           xm5, [r1]
+    movhps         xm5, [r1 + r4]
+    vinserti128    m4, m4, xm5, 1                                 ; pix2
+    psadbw         m6, m3, m4
+    paddq          m0, m6                                         ; res[0]
+
+    movq           xm4, [r2]
+    movhps         xm4, [r2 + r4]
+    lea            r2,  [r2 + 2 * r4]
+    movq           xm5, [r2]
+    movhps         xm5, [r2 + r4]
+    vinserti128    m4, m4, xm5, 1                                 ; pix3
+    psadbw         m6, m3, m4
+    paddq          m1, m6                                         ; res[1]
+
+    movq           xm4, [r3]
+    movhps         xm4, [r3 + r4]
+    lea            r3,  [r3 + 2 * r4]
+    movq           xm5, [r3]
+    movhps         xm5, [r3 + r4]
+    vinserti128    m4, m4, xm5, 1                                 ; pix2
+    psadbw         m3, m4
+    paddq          m2, m3                                         ; res[2]
+
+    lea            r0,  [r0 + 4 * FENC_STRIDE]
+    lea            r1,  [r1 + 2* r4]
+    lea            r2,  [r2 + 2 * r4]
+    lea            r3,  [r3 + 2 * r4]
+
+    movq           xm3, [r0]
+    movhps         xm3, [r0 + FENC_STRIDE]
+    movq           xm4, [r0 + 2 * FENC_STRIDE]
+    movhps         xm4, [r0 + FENC_STRIDE + 2 * FENC_STRIDE]
+    vinserti128    m3, m3, xm4, 1                                 ; pix1
+
+    movq           xm4, [r1]
+    movhps         xm4, [r1 + r4]
+    lea            r1,  [r1 + 2 * r4]
+    movq           xm5, [r1]
+    movhps         xm5, [r1 + r4]
+    vinserti128    m4, m4, xm5, 1                                 ; pix2
+    psadbw         m6, m3, m4
+    paddq          m0, m6                                         ; res[0]
+
+    movq           xm4, [r2]
+    movhps         xm4, [r2 + r4]
+    lea            r2,  [r2 + 2 * r4]
+    movq           xm5, [r2]
+    movhps         xm5, [r2 + r4]
+    vinserti128    m4, m4, xm5, 1                                 ; pix3
+    psadbw         m6, m3, m4
+    paddq          m1, m6                                         ; res[1]
+
+    movq           xm4, [r3]
+    movhps         xm4, [r3 + r4]
+    lea            r3,  [r3 + 2 * r4]
+    movq           xm5, [r3]
+    movhps         xm5, [r3 + r4]
+    vinserti128    m4, m4, xm5, 1                                 ; pix2
+    psadbw         m3, m4
+    paddq          m2, m3                                         ; res[2]
+
+    vextracti128   xm4, m0, 1
+    paddd          xm0, xm4
+    pshufd         xm4, xm0, 2
+    paddd          xm0,xm4
+    movd           [r5], xm0
+
+    vextracti128   xm4, m1, 1
+    paddd          xm1, xm4
+    pshufd         xm4, xm1, 2
+    paddd          xm1,xm4
+    movd           [r5 + 4], xm1
+
+    vextracti128   xm4, m2, 1
+    paddd          xm2, xm4
+    pshufd         xm4, xm2, 2
+    paddd          xm2,xm4
+    movd           [r5 + 8], xm2
+
+RET
+%endif


More information about the x265-devel mailing list