[x265] [PATCH] asm: avx2 code for satd_48x64 and 64xN, improved over ~50% than SSE
dnyaneshwar at multicorewareinc.com
dnyaneshwar at multicorewareinc.com
Wed Apr 22 10:19:37 CEST 2015
# HG changeset patch
# User Dnyaneshwar G <dnyaneshwar at multicorewareinc.com>
# Date 1429682259 -19800
# Wed Apr 22 11:27:39 2015 +0530
# Node ID a6010d20ffe96b165af958d5f76b647d7c62dab5
# Parent 75344c17ad4ca1b98f8c767e5cb5c0daf51ae5e8
asm: avx2 code for satd_48x64 and 64xN, improved over ~50% than SSE
diff -r 75344c17ad4c -r a6010d20ffe9 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Wed Apr 22 11:19:18 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp Wed Apr 22 11:27:39 2015 +0530
@@ -1856,6 +1856,11 @@
p.pu[LUMA_32x24].satd = x265_pixel_satd_32x24_avx2;
p.pu[LUMA_32x32].satd = x265_pixel_satd_32x32_avx2;
p.pu[LUMA_32x64].satd = x265_pixel_satd_32x64_avx2;
+ p.pu[LUMA_48x64].satd = x265_pixel_satd_48x64_avx2;
+ p.pu[LUMA_64x16].satd = x265_pixel_satd_64x16_avx2;
+ p.pu[LUMA_64x32].satd = x265_pixel_satd_64x32_avx2;
+ p.pu[LUMA_64x48].satd = x265_pixel_satd_64x48_avx2;
+ p.pu[LUMA_64x64].satd = x265_pixel_satd_64x64_avx2;
p.pu[LUMA_32x8].sad = x265_pixel_sad_32x8_avx2;
p.pu[LUMA_32x16].sad = x265_pixel_sad_32x16_avx2;
diff -r 75344c17ad4c -r a6010d20ffe9 source/common/x86/pixel-a.asm
--- a/source/common/x86/pixel-a.asm Wed Apr 22 11:19:18 2015 +0530
+++ b/source/common/x86/pixel-a.asm Wed Apr 22 11:27:39 2015 +0530
@@ -10915,4 +10915,236 @@
movd eax, xm0
RET
+cglobal pixel_satd_48x64, 4,8,10 ; if WIN64 && cpuflag(avx2)
+ mova m7, [hmul_16p]
+ lea r4, [3 * r1]
+ lea r5, [3 * r3]
+ pxor m8, m8
+ pxor m9, m9
+ mov r6, r0
+ mov r7, r2
+
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ lea r0, [r6 + 16]
+ lea r2, [r7 + 16]
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ lea r0, [r6 + 32]
+ lea r2, [r7 + 32]
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+
+ paddd m8, m9
+ vextracti128 xm0, m8, 1
+ paddd xm0, xm8
+ movhlps xm1, xm0
+ paddd xm0, xm1
+ pshuflw xm1, xm0, q0032
+ paddd xm0, xm1
+ movd eax, xm0
+ RET
+
+cglobal pixel_satd_64x16, 4,8,10 ; if WIN64 && cpuflag(avx2)
+ mova m7, [hmul_16p]
+ lea r4, [3 * r1]
+ lea r5, [3 * r3]
+ pxor m8, m8
+ pxor m9, m9
+ mov r6, r0
+ mov r7, r2
+
+ call calc_satd_16x8
+ call calc_satd_16x8
+ lea r0, [r6 + 16]
+ lea r2, [r7 + 16]
+ call calc_satd_16x8
+ call calc_satd_16x8
+ lea r0, [r6 + 32]
+ lea r2, [r7 + 32]
+ call calc_satd_16x8
+ call calc_satd_16x8
+ lea r0, [r6 + 48]
+ lea r2, [r7 + 48]
+ call calc_satd_16x8
+ call calc_satd_16x8
+
+ paddd m8, m9
+ vextracti128 xm0, m8, 1
+ paddd xm0, xm8
+ movhlps xm1, xm0
+ paddd xm0, xm1
+ pshuflw xm1, xm0, q0032
+ paddd xm0, xm1
+ movd eax, xm0
+ RET
+
+cglobal pixel_satd_64x32, 4,8,10 ; if WIN64 && cpuflag(avx2)
+ mova m7, [hmul_16p]
+ lea r4, [3 * r1]
+ lea r5, [3 * r3]
+ pxor m8, m8
+ pxor m9, m9
+ mov r6, r0
+ mov r7, r2
+
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ lea r0, [r6 + 16]
+ lea r2, [r7 + 16]
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ lea r0, [r6 + 32]
+ lea r2, [r7 + 32]
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ lea r0, [r6 + 48]
+ lea r2, [r7 + 48]
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+
+ paddd m8, m9
+ vextracti128 xm0, m8, 1
+ paddd xm0, xm8
+ movhlps xm1, xm0
+ paddd xm0, xm1
+ pshuflw xm1, xm0, q0032
+ paddd xm0, xm1
+ movd eax, xm0
+ RET
+
+cglobal pixel_satd_64x48, 4,8,10 ; if WIN64 && cpuflag(avx2)
+ mova m7, [hmul_16p]
+ lea r4, [3 * r1]
+ lea r5, [3 * r3]
+ pxor m8, m8
+ pxor m9, m9
+ mov r6, r0
+ mov r7, r2
+
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ lea r0, [r6 + 16]
+ lea r2, [r7 + 16]
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ lea r0, [r6 + 32]
+ lea r2, [r7 + 32]
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ lea r0, [r6 + 48]
+ lea r2, [r7 + 48]
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+
+ paddd m8, m9
+ vextracti128 xm0, m8, 1
+ paddd xm0, xm8
+ movhlps xm1, xm0
+ paddd xm0, xm1
+ pshuflw xm1, xm0, q0032
+ paddd xm0, xm1
+ movd eax, xm0
+ RET
+
+cglobal pixel_satd_64x64, 4,8,10 ; if WIN64 && cpuflag(avx2)
+ mova m7, [hmul_16p]
+ lea r4, [3 * r1]
+ lea r5, [3 * r3]
+ pxor m8, m8
+ pxor m9, m9
+ mov r6, r0
+ mov r7, r2
+
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ lea r0, [r6 + 16]
+ lea r2, [r7 + 16]
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ lea r0, [r6 + 32]
+ lea r2, [r7 + 32]
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ lea r0, [r6 + 48]
+ lea r2, [r7 + 48]
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+ call calc_satd_16x8
+
+ paddd m8, m9
+ vextracti128 xm0, m8, 1
+ paddd xm0, xm8
+ movhlps xm1, xm0
+ paddd xm0, xm1
+ pshuflw xm1, xm0, q0032
+ paddd xm0, xm1
+ movd eax, xm0
+ RET
+
%endif ; if ARCH_X86_64 == 1
More information about the x265-devel
mailing list