[x265] [PATCH] asm: assembly code for pixel_sad_x3_48x64
yuvaraj at multicorewareinc.com
yuvaraj at multicorewareinc.com
Thu Nov 7 07:56:39 CET 2013
# HG changeset patch
# User Yuvaraj Venkatesh <yuvaraj at multicorewareinc.com>
# Date 1383807314 -19800
# Thu Nov 07 12:25:14 2013 +0530
# Node ID 35e1a478048f9fd06b73c461cdea2d7abfe83673
# Parent 93cccbe49a93dd4c054ef06aca76974948793613
asm: assembly code for pixel_sad_x3_48x64
diff -r 93cccbe49a93 -r 35e1a478048f source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Wed Nov 06 19:49:38 2013 -0600
+++ b/source/common/x86/asm-primitives.cpp Thu Nov 07 12:25:14 2013 +0530
@@ -414,6 +414,7 @@
p.sad_x4[LUMA_32x24] = x265_pixel_sad_x4_32x24_ssse3;
p.sad_x4[LUMA_32x32] = x265_pixel_sad_x4_32x32_ssse3;
p.sad_x4[LUMA_32x64] = x265_pixel_sad_x4_32x64_ssse3;
+ p.sad_x3[LUMA_48x64] = x265_pixel_sad_x3_48x64_ssse3;
p.luma_hvpp[LUMA_8x8] = x265_interp_8tap_hv_pp_8x8_ssse3;
p.ipfilter_sp[FILTER_V_S_P_8] = x265_interp_8tap_v_sp_ssse3;
@@ -468,6 +469,7 @@
p.sad_x4[LUMA_32x24] = x265_pixel_sad_x4_32x24_avx;
p.sad_x4[LUMA_32x32] = x265_pixel_sad_x4_32x32_avx;
p.sad_x4[LUMA_32x64] = x265_pixel_sad_x4_32x64_avx;
+ p.sad_x3[LUMA_48x64] = x265_pixel_sad_x3_48x64_avx;
}
if (cpuMask & X265_CPU_XOP)
{
diff -r 93cccbe49a93 -r 35e1a478048f source/common/x86/sad-a.asm
--- a/source/common/x86/sad-a.asm Wed Nov 06 19:49:38 2013 -0600
+++ b/source/common/x86/sad-a.asm Thu Nov 07 12:25:14 2013 +0530
@@ -2230,6 +2230,140 @@
lea r4, [r4 + r5]
%endmacro
+%macro SAD_X3_48x4 0
+ mova m3, [r0]
+ mova m4, [r0 + 16]
+ mova m5, [r0 + 32]
+ movu m6, [r1]
+ psadbw m6, m3
+ paddd m0, m6
+ movu m6, [r1 + 16]
+ psadbw m6, m4
+ paddd m0, m6
+ movu m6, [r1 + 32]
+ psadbw m6, m5
+ paddd m0, m6
+ movu m6, [r2]
+ psadbw m6, m3
+ paddd m1, m6
+ movu m6, [r2 + 16]
+ psadbw m6, m4
+ paddd m1, m6
+ movu m6, [r2 + 32]
+ psadbw m6, m5
+ paddd m1, m6
+ movu m6, [r3]
+ psadbw m6, m3
+ paddd m2, m6
+ movu m6, [r3 + 16]
+ psadbw m6, m4
+ paddd m2, m6
+ movu m6, [r3 + 32]
+ psadbw m6, m5
+ paddd m2, m6
+
+ mova m3, [r0 + FENC_STRIDE]
+ mova m4, [r0 + 16 + FENC_STRIDE]
+ mova m5, [r0 + 32 + FENC_STRIDE]
+ movu m6, [r1 + r4]
+ psadbw m6, m3
+ paddd m0, m6
+ movu m6, [r1 + 16 + r4]
+ psadbw m6, m4
+ paddd m0, m6
+ movu m6, [r1 + 32 + r4]
+ psadbw m6, m5
+ paddd m0, m6
+ movu m6, [r2 + r4]
+ psadbw m6, m3
+ paddd m1, m6
+ movu m6, [r2 + 16 + r4]
+ psadbw m6, m4
+ paddd m1, m6
+ movu m6, [r2 + 32 + r4]
+ psadbw m6, m5
+ paddd m1, m6
+ movu m6, [r3 + r4]
+ psadbw m6, m3
+ paddd m2, m6
+ movu m6, [r3 + 16 + r4]
+ psadbw m6, m4
+ paddd m2, m6
+ movu m6, [r3 + 32 + r4]
+ psadbw m6, m5
+ paddd m2, m6
+
+ mova m3, [r0 + FENC_STRIDE * 2]
+ mova m4, [r0 + 16 + FENC_STRIDE * 2]
+ mova m5, [r0 + 32 + FENC_STRIDE * 2]
+ movu m6, [r1 + r4 * 2]
+ psadbw m6, m3
+ paddd m0, m6
+ movu m6, [r1 + 16 + r4 * 2]
+ psadbw m6, m4
+ paddd m0, m6
+ movu m6, [r1 + 32 + r4 * 2]
+ psadbw m6, m5
+ paddd m0, m6
+ movu m6, [r2 + r4 * 2]
+ psadbw m6, m3
+ paddd m1, m6
+ movu m6, [r2 + 16 + r4 * 2]
+ psadbw m6, m4
+ paddd m1, m6
+ movu m6, [r2 + 32 + r4 * 2]
+ psadbw m6, m5
+ paddd m1, m6
+ movu m6, [r3 + r4 * 2]
+ psadbw m6, m3
+ paddd m2, m6
+ movu m6, [r3 + 16 + r4 * 2]
+ psadbw m6, m4
+ paddd m2, m6
+ movu m6, [r3 + 32 + r4 * 2]
+ psadbw m6, m5
+ paddd m2, m6
+
+ lea r0, [r0 + FENC_STRIDE * 2]
+ lea r1, [r1 + r4 * 2]
+ lea r2, [r2 + r4 * 2]
+ lea r3, [r3 + r4 * 2]
+ mova m3, [r0 + FENC_STRIDE]
+ mova m4, [r0 + 16 + FENC_STRIDE]
+ mova m5, [r0 + 32 + FENC_STRIDE]
+ movu m6, [r1 + r4]
+ psadbw m6, m3
+ paddd m0, m6
+ movu m6, [r1 + 16 + r4]
+ psadbw m6, m4
+ paddd m0, m6
+ movu m6, [r1 + 32 + r4]
+ psadbw m6, m5
+ paddd m0, m6
+ movu m6, [r2 + r4]
+ psadbw m6, m3
+ paddd m1, m6
+ movu m6, [r2 + 16 + r4]
+ psadbw m6, m4
+ paddd m1, m6
+ movu m6, [r2 + 32 + r4]
+ psadbw m6, m5
+ paddd m1, m6
+ movu m6, [r3 + r4]
+ psadbw m6, m3
+ paddd m2, m6
+ movu m6, [r3 + 16 + r4]
+ psadbw m6, m4
+ paddd m2, m6
+ movu m6, [r3 + 32 + r4]
+ psadbw m6, m5
+ paddd m2, m6
+ lea r0, [r0 + FENC_STRIDE * 2]
+ lea r1, [r1 + r4 * 2]
+ lea r2, [r2 + r4 * 2]
+ lea r3, [r3 + r4 * 2]
+%endmacro
+
;-----------------------------------------------------------------------------
; void pixel_sad_x3_16x16( uint8_t *fenc, uint8_t *pix0, uint8_t *pix1,
; uint8_t *pix2, intptr_t i_stride, int scores[3] )
@@ -3001,6 +3135,23 @@
%endmacro
+%macro SAD_X3_W48 0
+cglobal pixel_sad_x3_48x64, 5, 7, 8
+ pxor m0, m0
+ pxor m1, m1
+ pxor m2, m2
+ mov r6, 64
+
+.loop
+ SAD_X3_48x4
+ SAD_X3_48x4
+ SAD_X3_48x4
+ SAD_X3_48x4
+
+ sub r6, 16
+ jnz .loop
+ SAD_X3_END_SSE2 1
+%endmacro
INIT_XMM sse2
SAD_X_SSE2 3, 16, 16, 7
@@ -3036,6 +3187,7 @@
SAD_X3_W12
SAD_X3_W32
SAD_X3_W24
+SAD_X3_W48
SAD_X_SSE2 3, 16, 64, 7
SAD_X_SSE2 3, 16, 32, 7
SAD_X_SSE2 3, 16, 16, 7
@@ -3060,6 +3212,7 @@
SAD_X3_W12
SAD_X3_W32
SAD_X3_W24
+SAD_X3_W48
SAD_X_SSE2 3, 16, 64, 7
SAD_X_SSE2 3, 16, 32, 6
SAD_X_SSE2 3, 16, 16, 6
More information about the x265-devel
mailing list