[x265] [PATCH] asm: avx2 code for sad_x4[8x4] for 8bpp

sumalatha at multicorewareinc.com sumalatha at multicorewareinc.com
Tue Mar 10 12:13:52 CET 2015


# HG changeset patch
# User Sumalatha Polureddy<sumalatha at multicorewareinc.com>
# Date 1425986024 -19800
# Node ID 699ac7bb215f027d926cb55d4a7a734196277293
# Parent  a5273cb9926a6a1e54cd15f1a8b6767157bd29f0
asm: avx2 code for sad_x4[8x4] for 8bpp

SSE3
sad_x4[  8x4]  53.87x   281.63          15170.72

AVX2
sad_x4[  8x4]  56.76x   260.06          14761.21

diff -r a5273cb9926a -r 699ac7bb215f source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Tue Mar 10 15:46:36 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp	Tue Mar 10 16:43:44 2015 +0530
@@ -1447,6 +1447,7 @@
         p.pu[LUMA_8x16].sad_x3 = x265_pixel_sad_x3_8x16_avx2;
 
         p.pu[LUMA_8x8].sad_x4 = x265_pixel_sad_x4_8x8_avx2;
+        p.pu[LUMA_8x4].sad_x4 = x265_pixel_sad_x4_8x4_avx2;
         p.pu[LUMA_16x8].sad_x4  = x265_pixel_sad_x4_16x8_avx2;
         p.pu[LUMA_16x12].sad_x4 = x265_pixel_sad_x4_16x12_avx2;
         p.pu[LUMA_16x16].sad_x4 = x265_pixel_sad_x4_16x16_avx2;
diff -r a5273cb9926a -r 699ac7bb215f source/common/x86/sad-a.asm
--- a/source/common/x86/sad-a.asm	Tue Mar 10 15:46:36 2015 +0530
+++ b/source/common/x86/sad-a.asm	Tue Mar 10 16:43:44 2015 +0530
@@ -3897,5 +3897,66 @@
     movq            [r6 + 8], xm1
     RET
 
+INIT_YMM avx2
+cglobal pixel_sad_x4_8x4, 7,7,5
+    xorps           m0, m0
+    xorps           m1, m1
+
+    sub             r2, r1          ; rebase on pointer r1
+    sub             r3, r1
+    sub             r4, r1
+
+    ; row 0
+    vpbroadcastq   xm2, [r0 + 0 * FENC_STRIDE]
+    movq           xm3, [r1]
+    movhps         xm3, [r1 + r2]
+    movq           xm4, [r1 + r3]
+    movhps         xm4, [r1 + r4]
+    psadbw         xm3, xm2
+    psadbw         xm4, xm2
+    paddd          xm0, xm3
+    paddd          xm1, xm4
+    add             r1, r5
+
+    ; row 1
+    vpbroadcastq   xm2, [r0 + 1 * FENC_STRIDE]
+    movq           xm3, [r1]
+    movhps         xm3, [r1 + r2]
+    movq           xm4, [r1 + r3]
+    movhps         xm4, [r1 + r4]
+    psadbw         xm3, xm2
+    psadbw         xm4, xm2
+    paddd          xm0, xm3
+    paddd          xm1, xm4
+    add             r1, r5
+
+    ; row 2
+    vpbroadcastq   xm2, [r0 + 2 * FENC_STRIDE]
+    movq           xm3, [r1]
+    movhps         xm3, [r1 + r2]
+    movq           xm4, [r1 + r3]
+    movhps         xm4, [r1 + r4]
+    psadbw         xm3, xm2
+    psadbw         xm4, xm2
+    paddd          xm0, xm3
+    paddd          xm1, xm4
+    add             r1, r5
+
+    ; row 3
+    vpbroadcastq   xm2, [r0 + 3 * FENC_STRIDE]
+    movq           xm3, [r1]
+    movhps         xm3, [r1 + r2]
+    movq           xm4, [r1 + r3]
+    movhps         xm4, [r1 + r4]
+    psadbw         xm3, xm2
+    psadbw         xm4, xm2
+    paddd          xm0, xm3
+    paddd          xm1, xm4
+
+    pshufd          xm0, xm0, q0020
+    pshufd          xm1, xm1, q0020
+    movq            [r6 + 0], xm0
+    movq            [r6 + 8], xm1
+    RET
 
 %endif


More information about the x265-devel mailing list