[x265] [PATCH] asm: assembly code for pixel_sad_x4_64xN
Steve Borho
steve at borho.org
Thu Nov 7 19:03:35 CET 2013
On Thu, Nov 7, 2013 at 6:01 AM, <yuvaraj at multicorewareinc.com> wrote:
> # HG changeset patch
> # User Yuvaraj Venkatesh <yuvaraj at multicorewareinc.com>
> # Date 1383825592 -19800
> # Thu Nov 07 17:29:52 2013 +0530
> # Node ID d956fd7741f2314a12bbbf529796589d4d6388bf
> # Parent 519267d0fce9bf4b2f5ec52fe6ddc08a274f16bd
> asm: assembly code for pixel_sad_x4_64xN
>
Nice; now the last SAD intrinsic functions can be removed; pixel-sse41.cpp
just reduced in size by 2/3.
>
> diff -r 519267d0fce9 -r d956fd7741f2 source/common/x86/asm-primitives.cpp
> --- a/source/common/x86/asm-primitives.cpp Thu Nov 07 16:40:20 2013
> +0530
> +++ b/source/common/x86/asm-primitives.cpp Thu Nov 07 17:29:52 2013
> +0530
> @@ -420,6 +420,10 @@
> p.sad_x3[LUMA_64x32] = x265_pixel_sad_x3_64x32_ssse3;
> p.sad_x3[LUMA_64x48] = x265_pixel_sad_x3_64x48_ssse3;
> p.sad_x3[LUMA_64x64] = x265_pixel_sad_x3_64x64_ssse3;
> + p.sad_x4[LUMA_64x16] = x265_pixel_sad_x4_64x16_ssse3;
> + p.sad_x4[LUMA_64x32] = x265_pixel_sad_x4_64x32_ssse3;
> + p.sad_x4[LUMA_64x48] = x265_pixel_sad_x4_64x48_ssse3;
> + p.sad_x4[LUMA_64x64] = x265_pixel_sad_x4_64x64_ssse3;
>
> p.luma_hvpp[LUMA_8x8] = x265_interp_8tap_hv_pp_8x8_ssse3;
> p.ipfilter_sp[FILTER_V_S_P_8] = x265_interp_8tap_v_sp_ssse3;
> @@ -480,6 +484,10 @@
> p.sad_x3[LUMA_64x32] = x265_pixel_sad_x3_64x32_avx;
> p.sad_x3[LUMA_64x48] = x265_pixel_sad_x3_64x48_avx;
> p.sad_x3[LUMA_64x64] = x265_pixel_sad_x3_64x64_avx;
> + p.sad_x4[LUMA_64x16] = x265_pixel_sad_x4_64x16_avx;
> + p.sad_x4[LUMA_64x32] = x265_pixel_sad_x4_64x32_avx;
> + p.sad_x4[LUMA_64x48] = x265_pixel_sad_x4_64x48_avx;
> + p.sad_x4[LUMA_64x64] = x265_pixel_sad_x4_64x64_avx;
> }
> if (cpuMask & X265_CPU_XOP)
> {
> diff -r 519267d0fce9 -r d956fd7741f2 source/common/x86/sad-a.asm
> --- a/source/common/x86/sad-a.asm Thu Nov 07 16:40:20 2013 +0530
> +++ b/source/common/x86/sad-a.asm Thu Nov 07 17:29:52 2013 +0530
> @@ -2710,6 +2710,230 @@
> lea r3, [r3 + r4 * 2]
> %endmacro
>
> +%macro SAD_X4_64x4 0
> + mova m4, [r0]
> + mova m5, [r0 + 16]
> + movu m6, [r1]
> + psadbw m6, m4
> + paddd m0, m6
> + movu m6, [r1 + 16]
> + psadbw m6, m5
> + paddd m0, m6
> + movu m6, [r2]
> + psadbw m6, m4
> + paddd m1, m6
> + movu m6, [r2 + 16]
> + psadbw m6, m5
> + paddd m1, m6
> + movu m6, [r3]
> + psadbw m6, m4
> + paddd m2, m6
> + movu m6, [r3 + 16]
> + psadbw m6, m5
> + paddd m2, m6
> + movu m6, [r4]
> + psadbw m6, m4
> + paddd m3, m6
> + movu m6, [r4 + 16]
> + psadbw m6, m5
> + paddd m3, m6
> + mova m4, [r0 + 32]
> + mova m5, [r0 + 48]
> + movu m6, [r1 + 32]
> + psadbw m6, m4
> + paddd m0, m6
> + movu m6, [r1 + 48]
> + psadbw m6, m5
> + paddd m0, m6
> + movu m6, [r2 + 32]
> + psadbw m6, m4
> + paddd m1, m6
> + movu m6, [r2 + 48]
> + psadbw m6, m5
> + paddd m1, m6
> + movu m6, [r3 + 32]
> + psadbw m6, m4
> + paddd m2, m6
> + movu m6, [r3 + 48]
> + psadbw m6, m5
> + paddd m2, m6
> + movu m6, [r4 + 32]
> + psadbw m6, m4
> + paddd m3, m6
> + movu m6, [r4 + 48]
> + psadbw m6, m5
> + paddd m3, m6
> +
> + mova m4, [r0 + FENC_STRIDE]
> + mova m5, [r0 + 16 + FENC_STRIDE]
> + movu m6, [r1 + r5]
> + psadbw m6, m4
> + paddd m0, m6
> + movu m6, [r1 + 16 + r5]
> + psadbw m6, m5
> + paddd m0, m6
> + movu m6, [r2 + r5]
> + psadbw m6, m4
> + paddd m1, m6
> + movu m6, [r2 + 16 + r5]
> + psadbw m6, m5
> + paddd m1, m6
> + movu m6, [r3 + r5]
> + psadbw m6, m4
> + paddd m2, m6
> + movu m6, [r3 + 16 + r5]
> + psadbw m6, m5
> + paddd m2, m6
> + movu m6, [r4 + r5]
> + psadbw m6, m4
> + paddd m3, m6
> + movu m6, [r4 + 16 + r5]
> + psadbw m6, m5
> + paddd m3, m6
> + mova m4, [r0 + 32 + FENC_STRIDE]
> + mova m5, [r0 + 48 + FENC_STRIDE]
> + movu m6, [r1 + 32 + r5]
> + psadbw m6, m4
> + paddd m0, m6
> + movu m6, [r1 + 48 + r5]
> + psadbw m6, m5
> + paddd m0, m6
> + movu m6, [r2 + 32 + r5]
> + psadbw m6, m4
> + paddd m1, m6
> + movu m6, [r2 + 48 + r5]
> + psadbw m6, m5
> + paddd m1, m6
> + movu m6, [r3 + 32 + r5]
> + psadbw m6, m4
> + paddd m2, m6
> + movu m6, [r3 + 48 + r5]
> + psadbw m6, m5
> + paddd m2, m6
> + movu m6, [r4 + 32 + r5]
> + psadbw m6, m4
> + paddd m3, m6
> + movu m6, [r4 + 48 + r5]
> + psadbw m6, m5
> + paddd m3, m6
> +
> + mova m4, [r0 + FENC_STRIDE * 2]
> + mova m5, [r0 + 16 + FENC_STRIDE * 2]
> + movu m6, [r1 + r5 * 2]
> + psadbw m6, m4
> + paddd m0, m6
> + movu m6, [r1 + 16 + r5 * 2]
> + psadbw m6, m5
> + paddd m0, m6
> + movu m6, [r2 + r5 * 2]
> + psadbw m6, m4
> + paddd m1, m6
> + movu m6, [r2 + 16 + r5 * 2]
> + psadbw m6, m5
> + paddd m1, m6
> + movu m6, [r3 + r5 * 2]
> + psadbw m6, m4
> + paddd m2, m6
> + movu m6, [r3 + 16 + r5 * 2]
> + psadbw m6, m5
> + paddd m2, m6
> + movu m6, [r4 + r5 * 2]
> + psadbw m6, m4
> + paddd m3, m6
> + movu m6, [r4 + 16 + r5 * 2]
> + psadbw m6, m5
> + paddd m3, m6
> + mova m4, [r0 + 32 + FENC_STRIDE * 2]
> + mova m5, [r0 + 48 + FENC_STRIDE * 2]
> + movu m6, [r1 + 32 + r5 * 2]
> + psadbw m6, m4
> + paddd m0, m6
> + movu m6, [r1 + 48 + r5 * 2]
> + psadbw m6, m5
> + paddd m0, m6
> + movu m6, [r2 + 32 + r5 * 2]
> + psadbw m6, m4
> + paddd m1, m6
> + movu m6, [r2 + 48 + r5 * 2]
> + psadbw m6, m5
> + paddd m1, m6
> + movu m6, [r3 + 32 + r5 * 2]
> + psadbw m6, m4
> + paddd m2, m6
> + movu m6, [r3 + 48 + r5 * 2]
> + psadbw m6, m5
> + paddd m2, m6
> + movu m6, [r4 + 32 + r5 * 2]
> + psadbw m6, m4
> + paddd m3, m6
> + movu m6, [r4 + 48 + r5 * 2]
> + psadbw m6, m5
> + paddd m3, m6
> +
> + lea r0, [r0 + FENC_STRIDE * 2]
> + lea r1, [r1 + r5 * 2]
> + lea r2, [r2 + r5 * 2]
> + lea r3, [r3 + r5 * 2]
> + lea r4, [r4 + r5 * 2]
> + mova m4, [r0 + FENC_STRIDE]
> + mova m5, [r0 + 16 + FENC_STRIDE]
> + movu m6, [r1 + r5]
> + psadbw m6, m4
> + paddd m0, m6
> + movu m6, [r1 + 16 + r5]
> + psadbw m6, m5
> + paddd m0, m6
> + movu m6, [r2 + r5]
> + psadbw m6, m4
> + paddd m1, m6
> + movu m6, [r2 + 16 + r5]
> + psadbw m6, m5
> + paddd m1, m6
> + movu m6, [r3 + r5]
> + psadbw m6, m4
> + paddd m2, m6
> + movu m6, [r3 + 16 + r5]
> + psadbw m6, m5
> + paddd m2, m6
> + movu m6, [r4 + r5]
> + psadbw m6, m4
> + paddd m3, m6
> + movu m6, [r4 + 16 + r5]
> + psadbw m6, m5
> + paddd m3, m6
> + mova m4, [r0 + 32 + FENC_STRIDE]
> + mova m5, [r0 + 48 + FENC_STRIDE]
> + movu m6, [r1 + 32 + r5]
> + psadbw m6, m4
> + paddd m0, m6
> + movu m6, [r1 + 48 + r5]
> + psadbw m6, m5
> + paddd m0, m6
> + movu m6, [r2 + 32 + r5]
> + psadbw m6, m4
> + paddd m1, m6
> + movu m6, [r2 + 48 + r5]
> + psadbw m6, m5
> + paddd m1, m6
> + movu m6, [r3 + 32 + r5]
> + psadbw m6, m4
> + paddd m2, m6
> + movu m6, [r3 + 48 + r5]
> + psadbw m6, m5
> + paddd m2, m6
> + movu m6, [r4 + 32 + r5]
> + psadbw m6, m4
> + paddd m3, m6
> + movu m6, [r4 + 48 + r5]
> + psadbw m6, m5
> + paddd m3, m6
> + lea r0, [r0 + FENC_STRIDE * 2]
> + lea r1, [r1 + r5 * 2]
> + lea r2, [r2 + r5 * 2]
> + lea r3, [r3 + r5 * 2]
> + lea r4, [r4 + r5 * 2]
> +%endmacro
> +
>
> ;-----------------------------------------------------------------------------
> ; void pixel_sad_x3_16x16( uint8_t *fenc, uint8_t *pix0, uint8_t *pix1,
> ; uint8_t *pix2, intptr_t i_stride, int
> scores[3] )
> @@ -3582,6 +3806,92 @@
> SAD_X3_END_SSE2 1
> %endmacro
>
> +%macro SAD_X4_W64 0
> +%if ARCH_X86_64 == 1
> +cglobal pixel_sad_x4_64x16, 6, 8, 8
> +%define count r7
> +%else
> +cglobal pixel_sad_x4_64x16, 6, 7, 8, 0-4
> +%define count dword [rsp]
> +%endif
> + pxor m0, m0
> + pxor m1, m1
> + pxor m2, m2
> + pxor m3, m3
> + mov count, 16
> +
> +.loop
> + SAD_X4_64x4
> + SAD_X4_64x4
> +
> + sub count, 8
> + jnz .loop
> + SAD_X4_END_SSE2 1
> +
> +%if ARCH_X86_64 == 1
> +cglobal pixel_sad_x4_64x32, 6, 8, 8
> +%define count r7
> +%else
> +cglobal pixel_sad_x4_64x32, 6, 7, 8, 0-4
> +%define count dword [rsp]
> +%endif
> + pxor m0, m0
> + pxor m1, m1
> + pxor m2, m2
> + pxor m3, m3
> + mov count, 32
> +
> +.loop
> + SAD_X4_64x4
> + SAD_X4_64x4
> +
> + sub count, 8
> + jnz .loop
> + SAD_X4_END_SSE2 1
> +
> +%if ARCH_X86_64 == 1
> +cglobal pixel_sad_x4_64x48, 6, 8, 8
> +%define count r7
> +%else
> +cglobal pixel_sad_x4_64x48, 6, 7, 8, 0-4
> +%define count dword [rsp]
> +%endif
> + pxor m0, m0
> + pxor m1, m1
> + pxor m2, m2
> + pxor m3, m3
> + mov count, 48
> +
> +.loop
> + SAD_X4_64x4
> + SAD_X4_64x4
> +
> + sub count, 8
> + jnz .loop
> + SAD_X4_END_SSE2 1
> +
> +%if ARCH_X86_64 == 1
> +cglobal pixel_sad_x4_64x64, 6, 8, 8
> +%define count r7
> +%else
> +cglobal pixel_sad_x4_64x64, 6, 7, 8, 0-4
> +%define count dword [rsp]
> +%endif
> + pxor m0, m0
> + pxor m1, m1
> + pxor m2, m2
> + pxor m3, m3
> + mov count, 64
> +
> +.loop
> + SAD_X4_64x4
> + SAD_X4_64x4
> +
> + sub count, 8
> + jnz .loop
> + SAD_X4_END_SSE2 1
> +%endmacro
> +
> INIT_XMM sse2
> SAD_X_SSE2 3, 16, 16, 7
> SAD_X_SSE2 3, 16, 8, 7
> @@ -3629,6 +3939,7 @@
> SAD_X4_W24
> SAD_X4_W32
> SAD_X4_W48
> +SAD_X4_W64
> SAD_X_SSE2 4, 16, 64, 7
> SAD_X_SSE2 4, 16, 32, 7
> SAD_X_SSE2 4, 16, 16, 7
> @@ -3655,6 +3966,7 @@
> SAD_X4_W24
> SAD_X4_W32
> SAD_X4_W48
> +SAD_X4_W64
> SAD_X_SSE2 4, 16, 64, 7
> SAD_X_SSE2 4, 16, 32, 7
> SAD_X_SSE2 4, 16, 16, 7
> _______________________________________________
> x265-devel mailing list
> x265-devel at videolan.org
> https://mailman.videolan.org/listinfo/x265-devel
>
--
Steve Borho
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mailman.videolan.org/pipermail/x265-devel/attachments/20131107/39ddfa6b/attachment-0001.html>
More information about the x265-devel
mailing list