[x265] [PATCH 2 of 5] asm: avx2 code for high_bit_depth satd_16xN, improved over ~50% than previous asm
dnyaneshwar at multicorewareinc.com
dnyaneshwar at multicorewareinc.com
Fri May 8 07:16:43 CEST 2015
# HG changeset patch
# User Dnyaneshwar G <dnyaneshwar at multicorewareinc.com>
# Date 1430988601 -19800
# Thu May 07 14:20:01 2015 +0530
# Node ID 0ea3afb105ecb96db2fe5a2f8a6e19d718a794b4
# Parent 948636c0bbabd45320b451834471d0976cce947b
asm: avx2 code for high_bit_depth satd_16xN, improved over ~50% than previous asm
diff -r 948636c0bbab -r 0ea3afb105ec source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Thu May 07 13:41:38 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp Thu May 07 14:20:01 2015 +0530
@@ -1181,7 +1181,12 @@
}
if (cpuMask & X265_CPU_AVX2)
{
+ p.pu[LUMA_16x4].satd = x265_pixel_satd_16x4_avx2;
p.pu[LUMA_16x8].satd = x265_pixel_satd_16x8_avx2;
+ p.pu[LUMA_16x12].satd = x265_pixel_satd_16x12_avx2;
+ p.pu[LUMA_16x16].satd = x265_pixel_satd_16x16_avx2;
+ p.pu[LUMA_16x32].satd = x265_pixel_satd_16x32_avx2;
+ p.pu[LUMA_16x64].satd = x265_pixel_satd_16x64_avx2;
p.cu[BLOCK_32x32].ssd_s = x265_pixel_ssd_s_32_avx2;
p.cu[BLOCK_16x16].sse_ss = x265_pixel_ssd_ss_16x16_avx2;
diff -r 948636c0bbab -r 0ea3afb105ec source/common/x86/pixel-a.asm
--- a/source/common/x86/pixel-a.asm Thu May 07 13:41:38 2015 +0530
+++ b/source/common/x86/pixel-a.asm Thu May 07 14:20:01 2015 +0530
@@ -11271,6 +11271,87 @@
paddd m6, m1
ret
+cglobal calc_satd_16x4 ; function to compute satd cost for 16 columns, 4 rows
+ ; rows 0-3
+ movu m0, [r0]
+ movu m4, [r2]
+ psubw m0, m4
+ movu m1, [r0 + r1]
+ movu m5, [r2 + r3]
+ psubw m1, m5
+ movu m2, [r0 + r1 * 2]
+ movu m4, [r2 + r3 * 2]
+ psubw m2, m4
+ movu m3, [r0 + r4]
+ movu m5, [r2 + r5]
+ psubw m3, m5
+ lea r0, [r0 + r1 * 4]
+ lea r2, [r2 + r3 * 4]
+ paddw m4, m0, m1
+ psubw m1, m0
+ paddw m0, m2, m3
+ psubw m3, m2
+ punpckhwd m2, m4, m1
+ punpcklwd m4, m1
+ punpckhwd m1, m0, m3
+ punpcklwd m0, m3
+ paddw m3, m4, m0
+ psubw m0, m4
+ paddw m4, m2, m1
+ psubw m1, m2
+ punpckhdq m2, m3, m0
+ punpckldq m3, m0
+ paddw m0, m3, m2
+ psubw m2, m3
+ punpckhdq m3, m4, m1
+ punpckldq m4, m1
+ paddw m1, m4, m3
+ psubw m3, m4
+ punpckhqdq m4, m0, m1
+ punpcklqdq m0, m1
+ pabsw m0, m0
+ pabsw m4, m4
+ pmaxsw m0, m0, m4
+ punpckhqdq m1, m2, m3
+ punpcklqdq m2, m3
+ pabsw m2, m2
+ pabsw m1, m1
+ pmaxsw m2, m1
+ pxor m7, m7
+ mova m1, m0
+ punpcklwd m1, m7
+ paddd m6, m1
+ mova m1, m0
+ punpckhwd m1, m7
+ paddd m6, m1
+ pxor m7, m7
+ mova m1, m2
+ punpcklwd m1, m7
+ paddd m6, m1
+ mova m1, m2
+ punpckhwd m1, m7
+ paddd m6, m1
+ ret
+
+cglobal pixel_satd_16x4, 4,6,8
+ add r1d, r1d
+ add r3d, r3d
+ lea r4, [3 * r1]
+ lea r5, [3 * r3]
+ pxor m6, m6
+
+ call calc_satd_16x4
+
+ vextracti128 xm7, m6, 1
+ paddd xm6, xm7
+ pxor xm7, xm7
+ movhlps xm7, xm6
+ paddd xm6, xm7
+ pshufd xm7, xm6, 1
+ paddd xm6, xm7
+ movd eax, xm6
+ RET
+
cglobal pixel_satd_16x8, 4,6,8
add r1d, r1d
add r3d, r3d
@@ -11289,4 +11370,92 @@
paddd xm6, xm7
movd eax, xm6
RET
+
+cglobal pixel_satd_16x12, 4,6,8
+ add r1d, r1d
+ add r3d, r3d
+ lea r4, [3 * r1]
+ lea r5, [3 * r3]
+ pxor m6, m6
+
+ call calc_satd_16x8
+ call calc_satd_16x4
+
+ vextracti128 xm7, m6, 1
+ paddd xm6, xm7
+ pxor xm7, xm7
+ movhlps xm7, xm6
+ paddd xm6, xm7
+ pshufd xm7, xm6, 1
+ paddd xm6, xm7
+ movd eax, xm6
+ RET
+
+cglobal pixel_satd_16x16, 4,6,8
+ add r1d, r1d
+ add r3d, r3d
+ lea r4, [3 * r1]
+ lea r5, [3 * r3]
+ pxor m6, m6
+
+ call calc_satd_16x8
+ call calc_satd_16x8
+
+ vextracti128 xm7, m6, 1
+ paddd xm6, xm7
+ pxor xm7, xm7
+ movhlps xm7, xm6
+ paddd xm6, xm7
+ pshufd xm7, xm6, 1
+ paddd xm6, xm7
+ movd eax, xm6
+ RET
+
+cglobal pixel_satd_16x32, 4,6,8
+ add r1d, r1d
+ add r3d, r3d
+ lea r4, [3 * r1]
+ lea r5, [3 * r3]
+ pxor m6, m6
+
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+
+ vextracti128 xm7, m6, 1
+ paddd xm6, xm7
+ pxor xm7, xm7
+ movhlps xm7, xm6
+ paddd xm6, xm7
+ pshufd xm7, xm6, 1
+ paddd xm6, xm7
+ movd eax, xm6
+ RET
+
+cglobal pixel_satd_16x64, 4,6,8
+ add r1d, r1d
+ add r3d, r3d
+ lea r4, [3 * r1]
+ lea r5, [3 * r3]
+ pxor m6, m6
+
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+
+ vextracti128 xm7, m6, 1
+ paddd xm6, xm7
+ pxor xm7, xm7
+ movhlps xm7, xm6
+ paddd xm6, xm7
+ pshufd xm7, xm6, 1
+ paddd xm6, xm7
+ movd eax, xm6
+ RET
%endif ; ARCH_X86_64 == 1 && HIGH_BIT_DEPTH == 1
More information about the x265-devel
mailing list