[x265] [PATCH] sm: avx2 code for sad_x4[8x8] for 8bpp

sumalatha at multicorewareinc.com sumalatha at multicorewareinc.com
Tue Mar 10 11:16:47 CET 2015


# HG changeset patch
# User Sumalatha Polureddy<sumalatha at multicorewareinc.com>
# Date 1425982596 -19800
# Node ID a5273cb9926a6a1e54cd15f1a8b6767157bd29f0
# Parent  f50217c5971c36f942dc542f411ae9e33f47e402
sm: avx2 code for sad_x4[8x8] for 8bpp

SSE3
sad_x4[  8x8]  73.26x   403.36          29550.04

AVX2
sad_x4[  8x8]  76.99x   385.05          29645.66

diff -r f50217c5971c -r a5273cb9926a source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Tue Mar 10 12:50:58 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp	Tue Mar 10 15:46:36 2015 +0530
@@ -1446,6 +1446,7 @@
         p.pu[LUMA_8x8].sad_x3 = x265_pixel_sad_x3_8x8_avx2;
         p.pu[LUMA_8x16].sad_x3 = x265_pixel_sad_x3_8x16_avx2;
 
+        p.pu[LUMA_8x8].sad_x4 = x265_pixel_sad_x4_8x8_avx2;
         p.pu[LUMA_16x8].sad_x4  = x265_pixel_sad_x4_16x8_avx2;
         p.pu[LUMA_16x12].sad_x4 = x265_pixel_sad_x4_16x12_avx2;
         p.pu[LUMA_16x16].sad_x4 = x265_pixel_sad_x4_16x16_avx2;
diff -r f50217c5971c -r a5273cb9926a source/common/x86/sad-a.asm
--- a/source/common/x86/sad-a.asm	Tue Mar 10 12:50:58 2015 +0530
+++ b/source/common/x86/sad-a.asm	Tue Mar 10 15:46:36 2015 +0530
@@ -3851,4 +3851,51 @@
     movd            [r5 + 8], xm1
     RET
 
+INIT_YMM avx2
+cglobal pixel_sad_x4_8x8, 7,7,5
+    xorps           m0, m0
+    xorps           m1, m1
+
+    sub             r2, r1          ; rebase on pointer r1
+    sub             r3, r1
+    sub             r4, r1
+%assign x 0
+%rep 4
+    ; row 0
+    vpbroadcastq   xm2, [r0 + 0 * FENC_STRIDE]
+    movq           xm3, [r1]
+    movhps         xm3, [r1 + r2]
+    movq           xm4, [r1 + r3]
+    movhps         xm4, [r1 + r4]
+    psadbw         xm3, xm2
+    psadbw         xm4, xm2
+    paddd          xm0, xm3
+    paddd          xm1, xm4
+    add             r1, r5
+
+    ; row 1
+    vpbroadcastq   xm2, [r0 + 1 * FENC_STRIDE]
+    movq           xm3, [r1]
+    movhps         xm3, [r1 + r2]
+    movq           xm4, [r1 + r3]
+    movhps         xm4, [r1 + r4]
+    psadbw         xm3, xm2
+    psadbw         xm4, xm2
+    paddd          xm0, xm3
+    paddd          xm1, xm4
+
+%assign x x+1
+  %if x < 4
+    add             r1, r5
+    add             r0, 2 * FENC_STRIDE
+  %endif
+%endrep
+
+    pshufd          xm0, xm0, q0020
+    pshufd          xm1, xm1, q0020
+    movq            [r6 + 0], xm0
+    movq            [r6 + 8], xm1
+    RET
+
+
 %endif


More information about the x265-devel mailing list