[x265] [PATCH] asm: psyCost_pp_64x64 in sse4: improve 417824c->56347c
Divya Manivannan
divya at multicorewareinc.com
Mon Jan 5 06:20:56 CET 2015
# HG changeset patch
# User Divya Manivannan <divya at multicorewareinc.com>
# Date 1420435173 -19800
# Mon Jan 05 10:49:33 2015 +0530
# Node ID c6e3822bc1ce38636bff5ea69af66733d755e43b
# Parent e93ef9322ba1214243187411bc0232e9579f72d4
asm: psyCost_pp_64x64 in sse4: improve 417824c->56347c
diff -r e93ef9322ba1 -r c6e3822bc1ce source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Mon Jan 05 10:34:14 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp Mon Jan 05 10:49:33 2015 +0530
@@ -1440,6 +1440,7 @@
p.psy_cost_pp[BLOCK_8x8] = x265_psyCost_pp_8x8_sse4;
p.psy_cost_pp[BLOCK_16x16] = x265_psyCost_pp_16x16_sse4;
p.psy_cost_pp[BLOCK_32x32] = x265_psyCost_pp_32x32_sse4;
+ p.psy_cost_pp[BLOCK_64x64] = x265_psyCost_pp_64x64_sse4;
#endif
}
if (cpuMask & X265_CPU_XOP)
@@ -1728,6 +1729,7 @@
p.psy_cost_pp[BLOCK_8x8] = x265_psyCost_pp_8x8_sse4;
p.psy_cost_pp[BLOCK_16x16] = x265_psyCost_pp_16x16_sse4;
p.psy_cost_pp[BLOCK_32x32] = x265_psyCost_pp_32x32_sse4;
+ p.psy_cost_pp[BLOCK_64x64] = x265_psyCost_pp_64x64_sse4;
#endif
}
if (cpuMask & X265_CPU_AVX)
diff -r e93ef9322ba1 -r c6e3822bc1ce source/common/x86/pixel-a.asm
--- a/source/common/x86/pixel-a.asm Mon Jan 05 10:34:14 2015 +0530
+++ b/source/common/x86/pixel-a.asm Mon Jan 05 10:49:33 2015 +0530
@@ -7344,3 +7344,216 @@
RET
%endif ; HIGH_BIT_DEPTH
%endif
+
+%if ARCH_X86_64
+%if HIGH_BIT_DEPTH
+INIT_XMM sse4
+cglobal psyCost_pp_64x64, 4, 9, 14
+
+ FIX_STRIDES r1, r3
+ lea r4, [3 * r1]
+ lea r8, [3 * r3]
+ mova m12, [pw_1]
+ mova m13, [pd_1]
+ pxor m11, m11
+ mov r7d, 8
+.loopH:
+ mov r6d, 8
+.loopW:
+ pxor m10, m10
+ movu m0, [r0]
+ movu m1, [r0 + r1]
+ movu m2, [r0 + r1 * 2]
+ movu m3, [r0 + r4]
+ lea r5, [r0 + r1 * 4]
+ movu m4, [r5]
+ movu m5, [r5 + r1]
+ movu m6, [r5 + r1 * 2]
+ movu m7, [r5 + r4]
+
+ paddw m8, m0, m1
+ paddw m8, m2
+ paddw m8, m3
+ paddw m8, m4
+ paddw m8, m5
+ paddw m8, m6
+ paddw m8, m7
+ pmaddwd m8, m12
+ movhlps m9, m8
+ paddd m8, m9
+ psrldq m9, m8, 4
+ paddd m8, m9
+ psrld m8, 2
+
+ HADAMARD8_2D 0, 1, 2, 3, 4, 5, 6, 7, 9, amax
+
+ paddd m0, m1
+ paddd m0, m2
+ paddd m0, m3
+ HADDUW m0, m1
+ paddd m0, m13
+ psrld m0, 1
+ psubd m10, m0, m8
+
+ movu m0, [r2]
+ movu m1, [r2 + r3]
+ movu m2, [r2 + r3 * 2]
+ movu m3, [r2 + r8]
+ lea r5, [r2 + r3 * 4]
+ movu m4, [r5]
+ movu m5, [r5 + r3]
+ movu m6, [r5 + r3 * 2]
+ movu m7, [r5 + r8]
+
+ paddw m8, m0, m1
+ paddw m8, m2
+ paddw m8, m3
+ paddw m8, m4
+ paddw m8, m5
+ paddw m8, m6
+ paddw m8, m7
+ pmaddwd m8, m12
+ movhlps m9, m8
+ paddd m8, m9
+ psrldq m9, m8, 4
+ paddd m8, m9
+ psrld m8, 2
+
+ HADAMARD8_2D 0, 1, 2, 3, 4, 5, 6, 7, 9, amax
+
+ paddd m0, m1
+ paddd m0, m2
+ paddd m0, m3
+ HADDUW m0, m1
+ paddd m0, m13
+ psrld m0, 1
+ psubd m0, m8
+ psubd m10, m0
+ pabsd m0, m10
+ paddd m11, m0
+ add r0, 16
+ add r2, 16
+ dec r6d
+ jnz .loopW
+ lea r0, [r0 + r1 * 8 - 128]
+ lea r2, [r2 + r3 * 8 - 128]
+ dec r7d
+ jnz .loopH
+ movd eax, m11
+ RET
+
+%else ; !HIGH_BIT_DEPTH
+INIT_XMM sse4
+cglobal psyCost_pp_64x64, 4, 9, 15
+
+ lea r4, [3 * r1]
+ lea r8, [3 * r3]
+ mova m8, [hmul_8p]
+ mova m10, [pw_1]
+ mova m14, [pd_1]
+ pxor m13, m13
+ mov r7d, 8
+.loopH:
+ mov r6d, 8
+.loopW:
+ pxor m12, m12
+ movddup m0, [r0]
+ movddup m1, [r0 + r1]
+ movddup m2, [r0 + r1 * 2]
+ movddup m3, [r0 + r4]
+ lea r5, [r0 + r1 * 4]
+ movddup m4, [r5]
+ movddup m5, [r5 + r1]
+ movddup m6, [r5 + r1 * 2]
+ movddup m7, [r5 + r4]
+
+ pmaddubsw m0, m8
+ pmaddubsw m1, m8
+ pmaddubsw m2, m8
+ pmaddubsw m3, m8
+ pmaddubsw m4, m8
+ pmaddubsw m5, m8
+ pmaddubsw m6, m8
+ pmaddubsw m7, m8
+
+ paddw m11, m0, m1
+ paddw m11, m2
+ paddw m11, m3
+ paddw m11, m4
+ paddw m11, m5
+ paddw m11, m6
+ paddw m11, m7
+
+ pmaddwd m11, m10
+ psrldq m9, m11, 4
+ paddd m11, m9
+ psrld m11, 2
+
+ HADAMARD8_2D_HMUL 0, 1, 2, 3, 4, 5, 6, 7, 9, 9
+
+ paddw m0, m1
+ paddw m0, m2
+ paddw m0, m3
+ HADDW m0, m1
+
+ paddd m0, m14
+ psrld m0, 1
+ psubd m12, m0, m11
+
+ movddup m0, [r2]
+ movddup m1, [r2 + r3]
+ movddup m2, [r2 + r3 * 2]
+ movddup m3, [r2 + r8]
+ lea r5, [r2 + r3 * 4]
+ movddup m4, [r5]
+ movddup m5, [r5 + r3]
+ movddup m6, [r5 + r3 * 2]
+ movddup m7, [r5 + r8]
+
+ pmaddubsw m0, m8
+ pmaddubsw m1, m8
+ pmaddubsw m2, m8
+ pmaddubsw m3, m8
+ pmaddubsw m4, m8
+ pmaddubsw m5, m8
+ pmaddubsw m6, m8
+ pmaddubsw m7, m8
+
+ paddw m11, m0, m1
+ paddw m11, m2
+ paddw m11, m3
+ paddw m11, m4
+ paddw m11, m5
+ paddw m11, m6
+ paddw m11, m7
+
+ pmaddwd m11, m10
+ psrldq m9, m11, 4
+ paddd m11, m9
+ psrld m11, 2
+
+ HADAMARD8_2D_HMUL 0, 1, 2, 3, 4, 5, 6, 7, 9, 9
+
+ paddw m0, m1
+ paddw m0, m2
+ paddw m0, m3
+ HADDW m0, m1
+
+ paddd m0, m14
+ psrld m0, 1
+ psubd m0, m11
+ psubd m12, m0
+ pabsd m0, m12
+ paddd m13, m0
+ add r0, 8
+ add r2, 8
+ dec r6d
+ jnz .loopW
+ lea r0, [r0 + r1 * 8 - 64]
+ lea r2, [r2 + r3 * 8 - 64]
+ dec r7d
+ jnz .loopH
+ movd eax, m13
+ RET
+%endif ; HIGH_BIT_DEPTH
+%endif
diff -r e93ef9322ba1 -r c6e3822bc1ce source/common/x86/pixel.h
--- a/source/common/x86/pixel.h Mon Jan 05 10:34:14 2015 +0530
+++ b/source/common/x86/pixel.h Mon Jan 05 10:49:33 2015 +0530
@@ -222,6 +222,7 @@
int x265_psyCost_pp_8x8_sse4(const pixel* source, intptr_t sstride, const pixel* recon, intptr_t rstride);
int x265_psyCost_pp_16x16_sse4(const pixel* source, intptr_t sstride, const pixel* recon, intptr_t rstride);
int x265_psyCost_pp_32x32_sse4(const pixel* source, intptr_t sstride, const pixel* recon, intptr_t rstride);
+int x265_psyCost_pp_64x64_sse4(const pixel* source, intptr_t sstride, const pixel* recon, intptr_t rstride);
#undef DECL_PIXELS
#undef DECL_HEVC_SSD
More information about the x265-devel
mailing list