[x265] [PATCH] asm: assembly code for pixel_sad_x3_64xN
chen
chenm003 at 163.com
Thu Nov 7 13:19:45 CET 2013
right
At 2013-11-07 19:12:33,yuvaraj at multicorewareinc.com wrote:
># HG changeset patch
># User Yuvaraj Venkatesh <yuvaraj at multicorewareinc.com>
># Date 1383822620 -19800
># Thu Nov 07 16:40:20 2013 +0530
># Node ID 519267d0fce9bf4b2f5ec52fe6ddc08a274f16bd
># Parent a8b449fb406d60fa674cfc71d7207bf0d2162175
>asm: assembly code for pixel_sad_x3_64xN
>
>diff -r a8b449fb406d -r 519267d0fce9 source/common/x86/asm-primitives.cpp
>--- a/source/common/x86/asm-primitives.cpp Thu Nov 07 13:07:18 2013 +0530
>+++ b/source/common/x86/asm-primitives.cpp Thu Nov 07 16:40:20 2013 +0530
>@@ -416,6 +416,10 @@
> p.sad_x4[LUMA_32x64] = x265_pixel_sad_x4_32x64_ssse3;
> p.sad_x3[LUMA_48x64] = x265_pixel_sad_x3_48x64_ssse3;
> p.sad_x4[LUMA_48x64] = x265_pixel_sad_x4_48x64_ssse3;
>+ p.sad_x3[LUMA_64x16] = x265_pixel_sad_x3_64x16_ssse3;
>+ p.sad_x3[LUMA_64x32] = x265_pixel_sad_x3_64x32_ssse3;
>+ p.sad_x3[LUMA_64x48] = x265_pixel_sad_x3_64x48_ssse3;
>+ p.sad_x3[LUMA_64x64] = x265_pixel_sad_x3_64x64_ssse3;
>
> p.luma_hvpp[LUMA_8x8] = x265_interp_8tap_hv_pp_8x8_ssse3;
> p.ipfilter_sp[FILTER_V_S_P_8] = x265_interp_8tap_v_sp_ssse3;
>@@ -472,6 +476,10 @@
> p.sad_x4[LUMA_32x64] = x265_pixel_sad_x4_32x64_avx;
> p.sad_x3[LUMA_48x64] = x265_pixel_sad_x3_48x64_avx;
> p.sad_x4[LUMA_48x64] = x265_pixel_sad_x4_48x64_avx;
>+ p.sad_x3[LUMA_64x16] = x265_pixel_sad_x3_64x16_avx;
>+ p.sad_x3[LUMA_64x32] = x265_pixel_sad_x3_64x32_avx;
>+ p.sad_x3[LUMA_64x48] = x265_pixel_sad_x3_64x48_avx;
>+ p.sad_x3[LUMA_64x64] = x265_pixel_sad_x3_64x64_avx;
> }
> if (cpuMask & X265_CPU_XOP)
> {
>diff -r a8b449fb406d -r 519267d0fce9 source/common/x86/sad-a.asm
>--- a/source/common/x86/sad-a.asm Thu Nov 07 13:07:18 2013 +0530
>+++ b/source/common/x86/sad-a.asm Thu Nov 07 16:40:20 2013 +0530
>@@ -2536,6 +2536,180 @@
> lea r4, [r4 + r5 * 2]
> %endmacro
>
>+%macro SAD_X3_64x4 0
>+ mova m3, [r0]
>+ mova m4, [r0 + 16]
>+ movu m5, [r1]
>+ psadbw m5, m3
>+ paddd m0, m5
>+ movu m5, [r1 + 16]
>+ psadbw m5, m4
>+ paddd m0, m5
>+ movu m5, [r2]
>+ psadbw m5, m3
>+ paddd m1, m5
>+ movu m5, [r2 + 16]
>+ psadbw m5, m4
>+ paddd m1, m5
>+ movu m5, [r3]
>+ psadbw m5, m3
>+ paddd m2, m5
>+ movu m5, [r3 + 16]
>+ psadbw m5, m4
>+ paddd m2, m5
>+ mova m3, [r0 + 32]
>+ mova m4, [r0 + 48]
>+ movu m5, [r1 + 32]
>+ psadbw m5, m3
>+ paddd m0, m5
>+ movu m5, [r1 + 48]
>+ psadbw m5, m4
>+ paddd m0, m5
>+ movu m5, [r2 + 32]
>+ psadbw m5, m3
>+ paddd m1, m5
>+ movu m5, [r2 + 48]
>+ psadbw m5, m4
>+ paddd m1, m5
>+ movu m5, [r3 + 32]
>+ psadbw m5, m3
>+ paddd m2, m5
>+ movu m5, [r3 + 48]
>+ psadbw m5, m4
>+ paddd m2, m5
>+
>+ mova m3, [r0 + FENC_STRIDE]
>+ mova m4, [r0 + 16 + FENC_STRIDE]
>+ movu m5, [r1 + r4]
>+ psadbw m5, m3
>+ paddd m0, m5
>+ movu m5, [r1 + 16 + r4]
>+ psadbw m5, m4
>+ paddd m0, m5
>+ movu m5, [r2 + r4]
>+ psadbw m5, m3
>+ paddd m1, m5
>+ movu m5, [r2 + 16 + r4]
>+ psadbw m5, m4
>+ paddd m1, m5
>+ movu m5, [r3 + r4]
>+ psadbw m5, m3
>+ paddd m2, m5
>+ movu m5, [r3 + 16 + r4]
>+ psadbw m5, m4
>+ paddd m2, m5
>+ mova m3, [r0 + 32 + FENC_STRIDE]
>+ mova m4, [r0 + 48 + FENC_STRIDE]
>+ movu m5, [r1 + 32 + r4]
>+ psadbw m5, m3
>+ paddd m0, m5
>+ movu m5, [r1 + 48 + r4]
>+ psadbw m5, m4
>+ paddd m0, m5
>+ movu m5, [r2 + 32 + r4]
>+ psadbw m5, m3
>+ paddd m1, m5
>+ movu m5, [r2 + 48 + r4]
>+ psadbw m5, m4
>+ paddd m1, m5
>+ movu m5, [r3 + 32 + r4]
>+ psadbw m5, m3
>+ paddd m2, m5
>+ movu m5, [r3 + 48 + r4]
>+ psadbw m5, m4
>+ paddd m2, m5
>+
>+ mova m3, [r0 + FENC_STRIDE * 2]
>+ mova m4, [r0 + 16 + FENC_STRIDE * 2]
>+ movu m5, [r1 + r4 * 2]
>+ psadbw m5, m3
>+ paddd m0, m5
>+ movu m5, [r1 + 16 + r4 * 2]
>+ psadbw m5, m4
>+ paddd m0, m5
>+ movu m5, [r2 + r4 * 2]
>+ psadbw m5, m3
>+ paddd m1, m5
>+ movu m5, [r2 + 16 + r4 * 2]
>+ psadbw m5, m4
>+ paddd m1, m5
>+ movu m5, [r3 + r4 * 2]
>+ psadbw m5, m3
>+ paddd m2, m5
>+ movu m5, [r3 + 16 + r4 * 2]
>+ psadbw m5, m4
>+ paddd m2, m5
>+ mova m3, [r0 + 32 + FENC_STRIDE * 2]
>+ mova m4, [r0 + 48 + FENC_STRIDE * 2]
>+ movu m5, [r1 + 32 + r4 * 2]
>+ psadbw m5, m3
>+ paddd m0, m5
>+ movu m5, [r1 + 48 + r4 * 2]
>+ psadbw m5, m4
>+ paddd m0, m5
>+ movu m5, [r2 + 32 + r4 * 2]
>+ psadbw m5, m3
>+ paddd m1, m5
>+ movu m5, [r2 + 48 + r4 * 2]
>+ psadbw m5, m4
>+ paddd m1, m5
>+ movu m5, [r3 + 32 + r4 * 2]
>+ psadbw m5, m3
>+ paddd m2, m5
>+ movu m5, [r3 + 48 + r4 * 2]
>+ psadbw m5, m4
>+ paddd m2, m5
>+
>+ lea r0, [r0 + FENC_STRIDE * 2]
>+ lea r1, [r1 + r4 * 2]
>+ lea r2, [r2 + r4 * 2]
>+ lea r3, [r3 + r4 * 2]
>+ mova m3, [r0 + FENC_STRIDE]
>+ mova m4, [r0 + 16 + FENC_STRIDE]
>+ movu m5, [r1 + r4]
>+ psadbw m5, m3
>+ paddd m0, m5
>+ movu m5, [r1 + 16 + r4]
>+ psadbw m5, m4
>+ paddd m0, m5
>+ movu m5, [r2 + r4]
>+ psadbw m5, m3
>+ paddd m1, m5
>+ movu m5, [r2 + 16 + r4]
>+ psadbw m5, m4
>+ paddd m1, m5
>+ movu m5, [r3 + r4]
>+ psadbw m5, m3
>+ paddd m2, m5
>+ movu m5, [r3 + 16 + r4]
>+ psadbw m5, m4
>+ paddd m2, m5
>+ mova m3, [r0 + 32 + FENC_STRIDE]
>+ mova m4, [r0 + 48 + FENC_STRIDE]
>+ movu m5, [r1 + 32 + r4]
>+ psadbw m5, m3
>+ paddd m0, m5
>+ movu m5, [r1 + 48 + r4]
>+ psadbw m5, m4
>+ paddd m0, m5
>+ movu m5, [r2 + 32 + r4]
>+ psadbw m5, m3
>+ paddd m1, m5
>+ movu m5, [r2 + 48 + r4]
>+ psadbw m5, m4
>+ paddd m1, m5
>+ movu m5, [r3 + 32 + r4]
>+ psadbw m5, m3
>+ paddd m2, m5
>+ movu m5, [r3 + 48 + r4]
>+ psadbw m5, m4
>+ paddd m2, m5
>+ lea r0, [r0 + FENC_STRIDE * 2]
>+ lea r1, [r1 + r4 * 2]
>+ lea r2, [r2 + r4 * 2]
>+ lea r3, [r3 + r4 * 2]
>+%endmacro
>+
> ;-----------------------------------------------------------------------------
> ; void pixel_sad_x3_16x16( uint8_t *fenc, uint8_t *pix0, uint8_t *pix1,
> ; uint8_t *pix2, intptr_t i_stride, int scores[3] )
>@@ -3350,6 +3524,64 @@
> SAD_X4_END_SSE2 1
> %endmacro
>
>+%macro SAD_X3_W64 0
>+cglobal pixel_sad_x3_64x16, 5, 7, 7
>+ pxor m0, m0
>+ pxor m1, m1
>+ pxor m2, m2
>+ mov r6, 16
>+
>+.loop
>+ SAD_X3_64x4
>+ SAD_X3_64x4
>+
>+ sub r6, 8
>+ jnz .loop
>+ SAD_X3_END_SSE2 1
>+
>+cglobal pixel_sad_x3_64x32, 5, 7, 7
>+ pxor m0, m0
>+ pxor m1, m1
>+ pxor m2, m2
>+ mov r6, 32
>+
>+.loop
>+ SAD_X3_64x4
>+ SAD_X3_64x4
>+
>+ sub r6, 8
>+ jnz .loop
>+ SAD_X3_END_SSE2 1
>+
>+cglobal pixel_sad_x3_64x48, 5, 7, 7
>+ pxor m0, m0
>+ pxor m1, m1
>+ pxor m2, m2
>+ mov r6, 48
>+
>+.loop
>+ SAD_X3_64x4
>+ SAD_X3_64x4
>+
>+ sub r6, 8
>+ jnz .loop
>+ SAD_X3_END_SSE2 1
>+
>+cglobal pixel_sad_x3_64x64, 5, 7, 7
>+ pxor m0, m0
>+ pxor m1, m1
>+ pxor m2, m2
>+ mov r6, 64
>+
>+.loop
>+ SAD_X3_64x4
>+ SAD_X3_64x4
>+
>+ sub r6, 8
>+ jnz .loop
>+ SAD_X3_END_SSE2 1
>+%endmacro
>+
> INIT_XMM sse2
> SAD_X_SSE2 3, 16, 16, 7
> SAD_X_SSE2 3, 16, 8, 7
>@@ -3385,6 +3617,7 @@
> SAD_X3_W32
> SAD_X3_W24
> SAD_X3_W48
>+SAD_X3_W64
> SAD_X_SSE2 3, 16, 64, 7
> SAD_X_SSE2 3, 16, 32, 7
> SAD_X_SSE2 3, 16, 16, 7
>@@ -3411,6 +3644,7 @@
> SAD_X3_W32
> SAD_X3_W24
> SAD_X3_W48
>+SAD_X3_W64
> SAD_X_SSE2 3, 16, 64, 7
> SAD_X_SSE2 3, 16, 32, 6
> SAD_X_SSE2 3, 16, 16, 6
>_______________________________________________
>x265-devel mailing list
>x265-devel at videolan.org
>https://mailman.videolan.org/listinfo/x265-devel
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mailman.videolan.org/pipermail/x265-devel/attachments/20131107/2020f5ba/attachment-0001.html>
More information about the x265-devel
mailing list