[x265] [PATCH 4 of 5] asm: avx2 code for high_bit_depth satd_64xN, improved over ~50% than previous asm
dnyaneshwar at multicorewareinc.com
dnyaneshwar at multicorewareinc.com
Fri May 8 07:16:45 CEST 2015
# HG changeset patch
# User Dnyaneshwar G <dnyaneshwar at multicorewareinc.com>
# Date 1430990374 -19800
# Thu May 07 14:49:34 2015 +0530
# Node ID 443ee8d99edf8c3a11f0c1a62f1dd210ee797048
# Parent 9fc3850626b8d2f2b7e28c2ddf70dd043c0fc165
asm: avx2 code for high_bit_depth satd_64xN, improved over ~50% than previous asm
diff -r 9fc3850626b8 -r 443ee8d99edf source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Thu May 07 14:38:56 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp Thu May 07 14:49:34 2015 +0530
@@ -1181,6 +1181,11 @@
}
if (cpuMask & X265_CPU_AVX2)
{
+ p.pu[LUMA_64x16].satd = x265_pixel_satd_64x16_avx2;
+ p.pu[LUMA_64x32].satd = x265_pixel_satd_64x32_avx2;
+ p.pu[LUMA_64x48].satd = x265_pixel_satd_64x48_avx2;
+ p.pu[LUMA_64x64].satd = x265_pixel_satd_64x64_avx2;
+
p.pu[LUMA_32x8].satd = x265_pixel_satd_32x8_avx2;
p.pu[LUMA_32x16].satd = x265_pixel_satd_32x16_avx2;
p.pu[LUMA_32x24].satd = x265_pixel_satd_32x24_avx2;
diff -r 9fc3850626b8 -r 443ee8d99edf source/common/x86/pixel-a.asm
--- a/source/common/x86/pixel-a.asm Thu May 07 14:38:56 2015 +0530
+++ b/source/common/x86/pixel-a.asm Thu May 07 14:49:34 2015 +0530
@@ -11614,4 +11614,212 @@
paddd xm6, xm7
movd eax, xm6
RET
+
+cglobal pixel_satd_64x16, 4,8,8
+ add r1d, r1d
+ add r3d, r3d
+ lea r4, [3 * r1]
+ lea r5, [3 * r3]
+ pxor m6, m6
+ mov r6, r0
+ mov r7, r2
+
+ call calc_satd_16x8
+ call calc_satd_16x8
+
+ lea r0, [r6 + 32]
+ lea r2, [r7 + 32]
+
+ call calc_satd_16x8
+ call calc_satd_16x8
+
+ lea r0, [r6 + 64]
+ lea r2, [r7 + 64]
+
+ call calc_satd_16x8
+ call calc_satd_16x8
+
+ lea r0, [r6 + 96]
+ lea r2, [r7 + 96]
+
+ call calc_satd_16x8
+ call calc_satd_16x8
+
+ vextracti128 xm7, m6, 1
+ paddd xm6, xm7
+ pxor xm7, xm7
+ movhlps xm7, xm6
+ paddd xm6, xm7
+ pshufd xm7, xm6, 1
+ paddd xm6, xm7
+ movd eax, xm6
+ RET
+
+cglobal pixel_satd_64x32, 4,8,8
+ add r1d, r1d
+ add r3d, r3d
+ lea r4, [3 * r1]
+ lea r5, [3 * r3]
+ pxor m6, m6
+ mov r6, r0
+ mov r7, r2
+
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+
+ lea r0, [r6 + 32]
+ lea r2, [r7 + 32]
+
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+
+ lea r0, [r6 + 64]
+ lea r2, [r7 + 64]
+
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+
+ lea r0, [r6 + 96]
+ lea r2, [r7 + 96]
+
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+
+ vextracti128 xm7, m6, 1
+ paddd xm6, xm7
+ pxor xm7, xm7
+ movhlps xm7, xm6
+ paddd xm6, xm7
+ pshufd xm7, xm6, 1
+ paddd xm6, xm7
+ movd eax, xm6
+ RET
+
+cglobal pixel_satd_64x48, 4,8,8
+ add r1d, r1d
+ add r3d, r3d
+ lea r4, [3 * r1]
+ lea r5, [3 * r3]
+ pxor m6, m6
+ mov r6, r0
+ mov r7, r2
+
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+
+ lea r0, [r6 + 32]
+ lea r2, [r7 + 32]
+
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+
+ lea r0, [r6 + 64]
+ lea r2, [r7 + 64]
+
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+
+ lea r0, [r6 + 96]
+ lea r2, [r7 + 96]
+
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+
+ vextracti128 xm7, m6, 1
+ paddd xm6, xm7
+ pxor xm7, xm7
+ movhlps xm7, xm6
+ paddd xm6, xm7
+ pshufd xm7, xm6, 1
+ paddd xm6, xm7
+ movd eax, xm6
+ RET
+
+cglobal pixel_satd_64x64, 4,8,8
+ add r1d, r1d
+ add r3d, r3d
+ lea r4, [3 * r1]
+ lea r5, [3 * r3]
+ pxor m6, m6
+ mov r6, r0
+ mov r7, r2
+
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+
+ lea r0, [r6 + 32]
+ lea r2, [r7 + 32]
+
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+
+ lea r0, [r6 + 64]
+ lea r2, [r7 + 64]
+
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+
+ lea r0, [r6 + 96]
+ lea r2, [r7 + 96]
+
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+
+ vextracti128 xm7, m6, 1
+ paddd xm6, xm7
+ pxor xm7, xm7
+ movhlps xm7, xm6
+ paddd xm6, xm7
+ pshufd xm7, xm6, 1
+ paddd xm6, xm7
+ movd eax, xm6
+ RET
%endif ; ARCH_X86_64 == 1 && HIGH_BIT_DEPTH == 1
More information about the x265-devel
mailing list