[x265] [PATCH] asm: avx2 code for sad_x3_64xN, improved over 40% than SSE
dnyaneshwar at multicorewareinc.com
dnyaneshwar at multicorewareinc.com
Fri Sep 25 06:54:29 CEST 2015
# HG changeset patch
# User Dnyaneshwar G <dnyaneshwar at multicorewareinc.com>
# Date 1443156630 -19800
# Fri Sep 25 10:20:30 2015 +0530
# Node ID d890ce2af3de2f1c23f5bf07fef6471417b7f8ef
# Parent 310d35ed0ba85174676d0b0bb91e6b8b5f475726
asm: avx2 code for sad_x3_64xN, improved over 40% than SSE
diff -r 310d35ed0ba8 -r d890ce2af3de source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Fri Sep 25 10:19:11 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp Fri Sep 25 10:20:30 2015 +0530
@@ -3592,6 +3592,11 @@
p.pu[LUMA_32x24].sad_x3 = PFX(pixel_sad_x3_32x24_avx2);
p.pu[LUMA_32x32].sad_x3 = PFX(pixel_sad_x3_32x32_avx2);
p.pu[LUMA_32x64].sad_x3 = PFX(pixel_sad_x3_32x64_avx2);
+ p.pu[LUMA_64x16].sad_x3 = PFX(pixel_sad_x3_64x16_avx2);
+ p.pu[LUMA_64x32].sad_x3 = PFX(pixel_sad_x3_64x32_avx2);
+ p.pu[LUMA_64x48].sad_x3 = PFX(pixel_sad_x3_64x48_avx2);
+ p.pu[LUMA_64x64].sad_x3 = PFX(pixel_sad_x3_64x64_avx2);
+ p.pu[LUMA_48x64].sad_x3 = PFX(pixel_sad_x3_48x64_avx2);
/* The following primitives have been disabled since performance compared to SSE is negligible/negative */
#if 0
diff -r 310d35ed0ba8 -r d890ce2af3de source/common/x86/sad-a.asm
--- a/source/common/x86/sad-a.asm Fri Sep 25 10:19:11 2015 +0530
+++ b/source/common/x86/sad-a.asm Fri Sep 25 10:20:30 2015 +0530
@@ -4779,6 +4779,372 @@
paddd m2, m3
%endmacro
+%macro SAD_X3_64x8_AVX2 0
+ movu m3, [r0]
+ movu m4, [r1]
+ movu m5, [r2]
+ movu m6, [r3]
+
+ psadbw m7, m3, m4
+ paddd m0, m7
+ psadbw m4, m3, m5
+ paddd m1, m4
+ psadbw m3, m6
+ paddd m2, m3
+
+ movu m3, [r0 + mmsize]
+ movu m4, [r1 + mmsize]
+ movu m5, [r2 + mmsize]
+ movu m6, [r3 + mmsize]
+
+ psadbw m7, m3, m4
+ paddd m0, m7
+ psadbw m4, m3, m5
+ paddd m1, m4
+ psadbw m3, m6
+ paddd m2, m3
+
+ movu m3, [r0 + FENC_STRIDE]
+ movu m4, [r1 + r4]
+ movu m5, [r2 + r4]
+ movu m6, [r3 + r4]
+
+ psadbw m7, m3, m4
+ paddd m0, m7
+ psadbw m4, m3, m5
+ paddd m1, m4
+ psadbw m3, m6
+ paddd m2, m3
+
+ movu m3, [r0 + FENC_STRIDE + mmsize]
+ movu m4, [r1 + r4 + mmsize]
+ movu m5, [r2 + r4 + mmsize]
+ movu m6, [r3 + r4 + mmsize]
+
+ psadbw m7, m3, m4
+ paddd m0, m7
+ psadbw m4, m3, m5
+ paddd m1, m4
+ psadbw m3, m6
+ paddd m2, m3
+
+ movu m3, [r0 + FENC_STRIDE * 2]
+ movu m4, [r1 + r4 * 2]
+ movu m5, [r2 + r4 * 2]
+ movu m6, [r3 + r4 * 2]
+
+ psadbw m7, m3, m4
+ paddd m0, m7
+ psadbw m4, m3, m5
+ paddd m1, m4
+ psadbw m3, m6
+ paddd m2, m3
+
+ movu m3, [r0 + FENC_STRIDE * 2 + mmsize]
+ movu m4, [r1 + r4 * 2 + mmsize]
+ movu m5, [r2 + r4 * 2 + mmsize]
+ movu m6, [r3 + r4 * 2 + mmsize]
+
+ psadbw m7, m3, m4
+ paddd m0, m7
+ psadbw m4, m3, m5
+ paddd m1, m4
+ psadbw m3, m6
+ paddd m2, m3
+
+ movu m3, [r0 + FENC_STRIDE * 3]
+ movu m4, [r1 + r6]
+ movu m5, [r2 + r6]
+ movu m6, [r3 + r6]
+
+ psadbw m7, m3, m4
+ paddd m0, m7
+ psadbw m4, m3, m5
+ paddd m1, m4
+ psadbw m3, m6
+ paddd m2, m3
+
+ movu m3, [r0 + FENC_STRIDE * 3 + mmsize]
+ movu m4, [r1 + r6 + mmsize]
+ movu m5, [r2 + r6 + mmsize]
+ movu m6, [r3 + r6 + mmsize]
+
+ psadbw m7, m3, m4
+ paddd m0, m7
+ psadbw m4, m3, m5
+ paddd m1, m4
+ psadbw m3, m6
+ paddd m2, m3
+
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r4 * 4]
+ lea r2, [r2 + r4 * 4]
+ lea r3, [r3 + r4 * 4]
+
+ movu m3, [r0]
+ movu m4, [r1]
+ movu m5, [r2]
+ movu m6, [r3]
+
+ psadbw m7, m3, m4
+ paddd m0, m7
+ psadbw m4, m3, m5
+ paddd m1, m4
+ psadbw m3, m6
+ paddd m2, m3
+
+ movu m3, [r0 + mmsize]
+ movu m4, [r1 + mmsize]
+ movu m5, [r2 + mmsize]
+ movu m6, [r3 + mmsize]
+
+ psadbw m7, m3, m4
+ paddd m0, m7
+ psadbw m4, m3, m5
+ paddd m1, m4
+ psadbw m3, m6
+ paddd m2, m3
+
+ movu m3, [r0 + FENC_STRIDE]
+ movu m4, [r1 + r4]
+ movu m5, [r2 + r4]
+ movu m6, [r3 + r4]
+
+ psadbw m7, m3, m4
+ paddd m0, m7
+ psadbw m4, m3, m5
+ paddd m1, m4
+ psadbw m3, m6
+ paddd m2, m3
+
+ movu m3, [r0 + FENC_STRIDE + mmsize]
+ movu m4, [r1 + r4 + mmsize]
+ movu m5, [r2 + r4 + mmsize]
+ movu m6, [r3 + r4 + mmsize]
+
+ psadbw m7, m3, m4
+ paddd m0, m7
+ psadbw m4, m3, m5
+ paddd m1, m4
+ psadbw m3, m6
+ paddd m2, m3
+
+ movu m3, [r0 + FENC_STRIDE * 2]
+ movu m4, [r1 + r4 * 2]
+ movu m5, [r2 + r4 * 2]
+ movu m6, [r3 + r4 * 2]
+
+ psadbw m7, m3, m4
+ paddd m0, m7
+ psadbw m4, m3, m5
+ paddd m1, m4
+ psadbw m3, m6
+ paddd m2, m3
+
+ movu m3, [r0 + FENC_STRIDE * 2 + mmsize]
+ movu m4, [r1 + r4 * 2 + mmsize]
+ movu m5, [r2 + r4 * 2 + mmsize]
+ movu m6, [r3 + r4 * 2 + mmsize]
+
+ psadbw m7, m3, m4
+ paddd m0, m7
+ psadbw m4, m3, m5
+ paddd m1, m4
+ psadbw m3, m6
+ paddd m2, m3
+
+ movu m3, [r0 + FENC_STRIDE * 3]
+ movu m4, [r1 + r6]
+ movu m5, [r2 + r6]
+ movu m6, [r3 + r6]
+
+ psadbw m7, m3, m4
+ paddd m0, m7
+ psadbw m4, m3, m5
+ paddd m1, m4
+ psadbw m3, m6
+ paddd m2, m3
+
+ movu m3, [r0 + FENC_STRIDE * 3 + mmsize]
+ movu m4, [r1 + r6 + mmsize]
+ movu m5, [r2 + r6 + mmsize]
+ movu m6, [r3 + r6 + mmsize]
+
+ psadbw m7, m3, m4
+ paddd m0, m7
+ psadbw m4, m3, m5
+ paddd m1, m4
+ psadbw m3, m6
+ paddd m2, m3
+%endmacro
+
+%macro SAD_X3_48x8_AVX2 0
+ movu m3, [r0]
+ movu m4, [r1]
+ movu m5, [r2]
+ movu m6, [r3]
+
+ psadbw m7, m3, m4
+ paddd m0, m7
+ psadbw m4, m3, m5
+ paddd m1, m4
+ psadbw m3, m6
+ paddd m2, m3
+
+ movu xm3, [r0 + mmsize]
+ movu xm4, [r1 + mmsize]
+ movu xm5, [r2 + mmsize]
+ movu xm6, [r3 + mmsize]
+ vinserti128 m3, m3, [r0 + FENC_STRIDE], 1
+ vinserti128 m4, m4, [r1 + r4], 1
+ vinserti128 m5, m5, [r2 + r4], 1
+ vinserti128 m6, m6, [r3 + r4], 1
+
+ psadbw m7, m3, m4
+ paddd m0, m7
+ psadbw m4, m3, m5
+ paddd m1, m4
+ psadbw m3, m6
+ paddd m2, m3
+
+ movu m3, [r0 + FENC_STRIDE + mmsize/2]
+ movu m4, [r1 + r4 + mmsize/2]
+ movu m5, [r2 + r4 + mmsize/2]
+ movu m6, [r3 + r4 + mmsize/2]
+
+ psadbw m7, m3, m4
+ paddd m0, m7
+ psadbw m4, m3, m5
+ paddd m1, m4
+ psadbw m3, m6
+ paddd m2, m3
+
+ movu m3, [r0 + FENC_STRIDE * 2]
+ movu m4, [r1 + r4 * 2]
+ movu m5, [r2 + r4 * 2]
+ movu m6, [r3 + r4 * 2]
+
+ psadbw m7, m3, m4
+ paddd m0, m7
+ psadbw m4, m3, m5
+ paddd m1, m4
+ psadbw m3, m6
+ paddd m2, m3
+
+ movu xm3, [r0 + FENC_STRIDE * 2 + mmsize]
+ movu xm4, [r1 + r4 * 2 + mmsize]
+ movu xm5, [r2 + r4 * 2 + mmsize]
+ movu xm6, [r3 + r4 * 2 + mmsize]
+ vinserti128 m3, m3, [r0 + FENC_STRIDE * 3], 1
+ vinserti128 m4, m4, [r1 + r6], 1
+ vinserti128 m5, m5, [r2 + r6], 1
+ vinserti128 m6, m6, [r3 + r6], 1
+
+ psadbw m7, m3, m4
+ paddd m0, m7
+ psadbw m4, m3, m5
+ paddd m1, m4
+ psadbw m3, m6
+ paddd m2, m3
+
+ movu m3, [r0 + FENC_STRIDE * 3 + mmsize/2]
+ movu m4, [r1 + r6 + mmsize/2]
+ movu m5, [r2 + r6 + mmsize/2]
+ movu m6, [r3 + r6 + mmsize/2]
+
+ psadbw m7, m3, m4
+ paddd m0, m7
+ psadbw m4, m3, m5
+ paddd m1, m4
+ psadbw m3, m6
+ paddd m2, m3
+
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r4 * 4]
+ lea r2, [r2 + r4 * 4]
+ lea r3, [r3 + r4 * 4]
+
+ movu m3, [r0]
+ movu m4, [r1]
+ movu m5, [r2]
+ movu m6, [r3]
+
+ psadbw m7, m3, m4
+ paddd m0, m7
+ psadbw m4, m3, m5
+ paddd m1, m4
+ psadbw m3, m6
+ paddd m2, m3
+
+ movu xm3, [r0 + mmsize]
+ movu xm4, [r1 + mmsize]
+ movu xm5, [r2 + mmsize]
+ movu xm6, [r3 + mmsize]
+ vinserti128 m3, m3, [r0 + FENC_STRIDE], 1
+ vinserti128 m4, m4, [r1 + r4], 1
+ vinserti128 m5, m5, [r2 + r4], 1
+ vinserti128 m6, m6, [r3 + r4], 1
+
+ psadbw m7, m3, m4
+ paddd m0, m7
+ psadbw m4, m3, m5
+ paddd m1, m4
+ psadbw m3, m6
+ paddd m2, m3
+
+ movu m3, [r0 + FENC_STRIDE + mmsize/2]
+ movu m4, [r1 + r4 + mmsize/2]
+ movu m5, [r2 + r4 + mmsize/2]
+ movu m6, [r3 + r4 + mmsize/2]
+
+ psadbw m7, m3, m4
+ paddd m0, m7
+ psadbw m4, m3, m5
+ paddd m1, m4
+ psadbw m3, m6
+ paddd m2, m3
+
+ movu m3, [r0 + FENC_STRIDE * 2]
+ movu m4, [r1 + r4 * 2]
+ movu m5, [r2 + r4 * 2]
+ movu m6, [r3 + r4 * 2]
+
+ psadbw m7, m3, m4
+ paddd m0, m7
+ psadbw m4, m3, m5
+ paddd m1, m4
+ psadbw m3, m6
+ paddd m2, m3
+
+ movu xm3, [r0 + FENC_STRIDE * 2 + mmsize]
+ movu xm4, [r1 + r4 * 2 + mmsize]
+ movu xm5, [r2 + r4 * 2 + mmsize]
+ movu xm6, [r3 + r4 * 2 + mmsize]
+ vinserti128 m3, m3, [r0 + FENC_STRIDE * 3], 1
+ vinserti128 m4, m4, [r1 + r6], 1
+ vinserti128 m5, m5, [r2 + r6], 1
+ vinserti128 m6, m6, [r3 + r6], 1
+
+ psadbw m7, m3, m4
+ paddd m0, m7
+ psadbw m4, m3, m5
+ paddd m1, m4
+ psadbw m3, m6
+ paddd m2, m3
+
+ movu m3, [r0 + FENC_STRIDE * 3 + mmsize/2]
+ movu m4, [r1 + r6 + mmsize/2]
+ movu m5, [r2 + r6 + mmsize/2]
+ movu m6, [r3 + r6 + mmsize/2]
+
+ psadbw m7, m3, m4
+ paddd m0, m7
+ psadbw m4, m3, m5
+ paddd m1, m4
+ psadbw m3, m6
+ paddd m2, m3
+%endmacro
+
%macro PIXEL_SAD_X3_END_AVX2 0
vextracti128 xm3, m0, 1
vextracti128 xm4, m1, 1
@@ -4938,6 +5304,217 @@
SAD_X3_32x8_AVX2
PIXEL_SAD_X3_END_AVX2
RET
+
+cglobal pixel_sad_x3_64x16, 6,7,8
+ pxor m0, m0
+ pxor m1, m1
+ pxor m2, m2
+ lea r6, [r4 * 3]
+
+ SAD_X3_64x8_AVX2
+
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r4 * 4]
+ lea r2, [r2 + r4 * 4]
+ lea r3, [r3 + r4 * 4]
+
+ SAD_X3_64x8_AVX2
+ PIXEL_SAD_X3_END_AVX2
+ RET
+
+cglobal pixel_sad_x3_64x32, 6,7,8
+ pxor m0, m0
+ pxor m1, m1
+ pxor m2, m2
+ lea r6, [r4 * 3]
+
+ SAD_X3_64x8_AVX2
+
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r4 * 4]
+ lea r2, [r2 + r4 * 4]
+ lea r3, [r3 + r4 * 4]
+
+ SAD_X3_64x8_AVX2
+
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r4 * 4]
+ lea r2, [r2 + r4 * 4]
+ lea r3, [r3 + r4 * 4]
+
+ SAD_X3_64x8_AVX2
+
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r4 * 4]
+ lea r2, [r2 + r4 * 4]
+ lea r3, [r3 + r4 * 4]
+
+ SAD_X3_64x8_AVX2
+ PIXEL_SAD_X3_END_AVX2
+ RET
+
+cglobal pixel_sad_x3_64x48, 6,7,8
+ pxor m0, m0
+ pxor m1, m1
+ pxor m2, m2
+ lea r6, [r4 * 3]
+
+ SAD_X3_64x8_AVX2
+
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r4 * 4]
+ lea r2, [r2 + r4 * 4]
+ lea r3, [r3 + r4 * 4]
+
+ SAD_X3_64x8_AVX2
+
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r4 * 4]
+ lea r2, [r2 + r4 * 4]
+ lea r3, [r3 + r4 * 4]
+
+ SAD_X3_64x8_AVX2
+
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r4 * 4]
+ lea r2, [r2 + r4 * 4]
+ lea r3, [r3 + r4 * 4]
+
+ SAD_X3_64x8_AVX2
+
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r4 * 4]
+ lea r2, [r2 + r4 * 4]
+ lea r3, [r3 + r4 * 4]
+
+ SAD_X3_64x8_AVX2
+
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r4 * 4]
+ lea r2, [r2 + r4 * 4]
+ lea r3, [r3 + r4 * 4]
+
+ SAD_X3_64x8_AVX2
+ PIXEL_SAD_X3_END_AVX2
+ RET
+
+cglobal pixel_sad_x3_64x64, 6,7,8
+ pxor m0, m0
+ pxor m1, m1
+ pxor m2, m2
+ lea r6, [r4 * 3]
+
+ SAD_X3_64x8_AVX2
+
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r4 * 4]
+ lea r2, [r2 + r4 * 4]
+ lea r3, [r3 + r4 * 4]
+
+ SAD_X3_64x8_AVX2
+
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r4 * 4]
+ lea r2, [r2 + r4 * 4]
+ lea r3, [r3 + r4 * 4]
+
+ SAD_X3_64x8_AVX2
+
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r4 * 4]
+ lea r2, [r2 + r4 * 4]
+ lea r3, [r3 + r4 * 4]
+
+ SAD_X3_64x8_AVX2
+
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r4 * 4]
+ lea r2, [r2 + r4 * 4]
+ lea r3, [r3 + r4 * 4]
+
+ SAD_X3_64x8_AVX2
+
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r4 * 4]
+ lea r2, [r2 + r4 * 4]
+ lea r3, [r3 + r4 * 4]
+
+ SAD_X3_64x8_AVX2
+
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r4 * 4]
+ lea r2, [r2 + r4 * 4]
+ lea r3, [r3 + r4 * 4]
+
+ SAD_X3_64x8_AVX2
+
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r4 * 4]
+ lea r2, [r2 + r4 * 4]
+ lea r3, [r3 + r4 * 4]
+
+ SAD_X3_64x8_AVX2
+ PIXEL_SAD_X3_END_AVX2
+ RET
+
+cglobal pixel_sad_x3_48x64, 6,7,8
+ pxor m0, m0
+ pxor m1, m1
+ pxor m2, m2
+ lea r6, [r4 * 3]
+
+ SAD_X3_48x8_AVX2
+
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r4 * 4]
+ lea r2, [r2 + r4 * 4]
+ lea r3, [r3 + r4 * 4]
+
+ SAD_X3_48x8_AVX2
+
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r4 * 4]
+ lea r2, [r2 + r4 * 4]
+ lea r3, [r3 + r4 * 4]
+
+ SAD_X3_48x8_AVX2
+
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r4 * 4]
+ lea r2, [r2 + r4 * 4]
+ lea r3, [r3 + r4 * 4]
+
+ SAD_X3_48x8_AVX2
+
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r4 * 4]
+ lea r2, [r2 + r4 * 4]
+ lea r3, [r3 + r4 * 4]
+
+ SAD_X3_48x8_AVX2
+
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r4 * 4]
+ lea r2, [r2 + r4 * 4]
+ lea r3, [r3 + r4 * 4]
+
+ SAD_X3_48x8_AVX2
+
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r4 * 4]
+ lea r2, [r2 + r4 * 4]
+ lea r3, [r3 + r4 * 4]
+
+ SAD_X3_48x8_AVX2
+
+ add r0, FENC_STRIDE * 4
+ lea r1, [r1 + r4 * 4]
+ lea r2, [r2 + r4 * 4]
+ lea r3, [r3 + r4 * 4]
+
+ SAD_X3_48x8_AVX2
+ PIXEL_SAD_X3_END_AVX2
+ RET
%endif
INIT_YMM avx2
More information about the x265-devel
mailing list