[x265] [PATCH] asm: avx2 code for sad_x3[8x32] for 8bpp - 70x
sumalatha at multicorewareinc.com
sumalatha at multicorewareinc.com
Mon Mar 9 10:51:13 CET 2015
# HG changeset patch
# User Sumalatha Polureddy<sumalatha at multicorewareinc.com>
# Date 1425894660 -19800
# Node ID 12ee2fe1b2f5e19b3f24c4108bc7e7eb6775d54e
# Parent 7f8b41ba71cb518597202c5da7c16e2ad3e4d6f9
asm: avx2 code for sad_x3[8x32] for 8bpp - 70x
sad_x3[ 8x32] 70.50x 1284.68 90571.77
diff -r 7f8b41ba71cb -r 12ee2fe1b2f5 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Mon Mar 09 15:10:28 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp Mon Mar 09 15:21:00 2015 +0530
@@ -1444,6 +1444,7 @@
p.pu[LUMA_8x8].sad_x3 = x265_pixel_sad_x3_8x8_avx2;
p.pu[LUMA_8x16].sad_x3 = x265_pixel_sad_x3_8x16_avx2;
+ p.pu[LUMA_8x32].sad_x3 = x265_pixel_sad_x3_8x32_avx2;
p.pu[LUMA_16x8].sad_x4 = x265_pixel_sad_x4_16x8_avx2;
p.pu[LUMA_16x12].sad_x4 = x265_pixel_sad_x4_16x12_avx2;
diff -r 7f8b41ba71cb -r 12ee2fe1b2f5 source/common/x86/sad-a.asm
--- a/source/common/x86/sad-a.asm Mon Mar 09 15:10:28 2015 +0530
+++ b/source/common/x86/sad-a.asm Mon Mar 09 15:21:00 2015 +0530
@@ -3877,4 +3877,72 @@
RET
+INIT_YMM avx2
+cglobal pixel_sad_x3_8x32, 6, 7, 7, pix1, pix2, pix3, pix4, frefstride, res
+ pxor xm0, xm0
+ mova xm1, xm0
+ mova xm2, xm0
+ mov r6d, 8
+
+.loop
+ movq xm3, [r0]
+ movhps xm3, [r0 + FENC_STRIDE]
+ movq xm4, [r0 + 2 * FENC_STRIDE]
+ movhps xm4, [r0 + FENC_STRIDE + 2 * FENC_STRIDE]
+ vinserti128 m3, m3, xm4, 1 ; pix1
+
+ movq xm4, [r1]
+ movhps xm4, [r1 + r4]
+ lea r1, [r1 + 2 * r4]
+ movq xm5, [r1]
+ movhps xm5, [r1 + r4]
+ vinserti128 m4, m4, xm5, 1 ; pix2
+ psadbw m6, m3, m4
+ paddq m0, m6 ; res[0]
+
+ movq xm4, [r2]
+ movhps xm4, [r2 + r4]
+ lea r2, [r2 + 2 * r4]
+ movq xm5, [r2]
+ movhps xm5, [r2 + r4]
+ vinserti128 m4, m4, xm5, 1 ; pix3
+ psadbw m6, m3, m4
+ paddq m1, m6 ; res[1]
+
+ movq xm4, [r3]
+ movhps xm4, [r3 + r4]
+ lea r3, [r3 + 2 * r4]
+ movq xm5, [r3]
+ movhps xm5, [r3 + r4]
+ vinserti128 m4, m4, xm5, 1 ; pix2
+ psadbw m3, m4
+ paddq m2, m3 ; res[2]
+
+ lea r0, [r0 + 4 * FENC_STRIDE]
+ lea r1, [r1 + 2* r4]
+ lea r2, [r2 + 2 * r4]
+ lea r3, [r3 + 2 * r4]
+
+ dec r6d
+ jnz .loop
+
+ vextracti128 xm4, m0, 1
+ paddd xm0, xm4
+ pshufd xm4, xm0, 2
+ paddd xm0,xm4
+ movd [r5], xm0
+
+ vextracti128 xm4, m1, 1
+ paddd xm1, xm4
+ pshufd xm4, xm1, 2
+ paddd xm1,xm4
+ movd [r5 + 4], xm1
+
+ vextracti128 xm4, m2, 1
+ paddd xm2, xm4
+ pshufd xm4, xm2, 2
+ paddd xm2,xm4
+ movd [r5 + 8], xm2
+
+RET
%endif
More information about the x265-devel
mailing list