[x265] [PATCH] asm: assembly code for pixel_sse_ss_48x64
Yuvaraj Venkatesh
yuvaraj at multicorewareinc.com
Tue Nov 26 15:05:37 CET 2013
Re-sending the patch after correcting the alignment problem.
On Tue, Nov 26, 2013 at 4:59 PM, <yuvaraj at multicorewareinc.com> wrote:
> # HG changeset patch
> # User Yuvaraj Venkatesh <yuvaraj at multicorewareinc.com>
> # Date 1385465357 -19800
> # Tue Nov 26 16:59:17 2013 +0530
> # Node ID 9aca23400805335f6cd3134d8dc045a00432c4d1
> # Parent 52738c22dce02e8d59cc4b09f1e1b23a0a8360c5
> asm: assembly code for pixel_sse_ss_48x64
>
> diff -r 52738c22dce0 -r 9aca23400805 source/common/x86/asm-primitives.cpp
> --- a/source/common/x86/asm-primitives.cpp Tue Nov 26 16:15:02 2013
> +0530
> +++ b/source/common/x86/asm-primitives.cpp Tue Nov 26 16:59:17 2013
> +0530
> @@ -108,7 +108,8 @@
> p.sse_ss[LUMA_32x16] = x265_pixel_ssd_ss_32x16_ ## cpu; \
> p.sse_ss[LUMA_32x24] = x265_pixel_ssd_ss_32x24_ ## cpu; \
> p.sse_ss[LUMA_32x32] = x265_pixel_ssd_ss_32x32_ ## cpu; \
> - p.sse_ss[LUMA_32x64] = x265_pixel_ssd_ss_32x64_ ## cpu;
> + p.sse_ss[LUMA_32x64] = x265_pixel_ssd_ss_32x64_ ## cpu; \
> + p.sse_ss[LUMA_48x64] = x265_pixel_ssd_ss_48x64_ ## cpu;
>
> #define SA8D_INTER_FROM_BLOCK(cpu) \
> p.sa8d_inter[LUMA_4x8] = x265_pixel_satd_4x8_ ## cpu; \
> diff -r 52738c22dce0 -r 9aca23400805 source/common/x86/pixel-a.asm
> --- a/source/common/x86/pixel-a.asm Tue Nov 26 16:15:02 2013 +0530
> +++ b/source/common/x86/pixel-a.asm Tue Nov 26 16:59:17 2013 +0530
> @@ -511,21 +511,90 @@
> RET
> %endmacro
>
> +%macro SSD_SS_48 0
> +cglobal pixel_ssd_ss_48x64, 4,7,6
> + FIX_STRIDES r1, r3
> + mov r4d, 32
> + pxor m0, m0
> +.loop
> + mova m1, [r0]
> + psubw m1, [r2]
> + pmaddwd m1, m1
> + paddd m0, m1
> + mova m1, [r0 + 16]
> + psubw m1, [r2 + 16]
> + pmaddwd m1, m1
> + paddd m0, m1
> + mova m1, [r0 + 32]
> + psubw m1, [r2 + 32]
> + pmaddwd m1, m1
> + paddd m0, m1
> + mova m1, [r0 + 48]
> + psubw m1, [r2 + 48]
> + pmaddwd m1, m1
> + paddd m0, m1
> + mova m1, [r0 + 64]
> + psubw m1, [r2 + 64]
> + pmaddwd m1, m1
> + paddd m0, m1
> + mova m1, [r0 + 80]
> + psubw m1, [r2 + 80]
> + pmaddwd m1, m1
> + paddd m0, m1
> + lea r0, [r0 + 2*r1]
> + lea r2, [r2 + 2*r3]
> + mova m1, [r0]
> + psubw m1, [r2]
> + pmaddwd m1, m1
> + paddd m0, m1
> + mova m1, [r0 + 16]
> + psubw m1, [r2 + 16]
> + pmaddwd m1, m1
> + paddd m0, m1
> + mova m1, [r0 + 32]
> + psubw m1, [r2 + 32]
> + pmaddwd m1, m1
> + paddd m0, m1
> + mova m1, [r0 + 48]
> + psubw m1, [r2 + 48]
> + pmaddwd m1, m1
> + paddd m0, m1
> + mova m1, [r0 + 64]
> + psubw m1, [r2 + 64]
> + pmaddwd m1, m1
> + paddd m0, m1
> + mova m1, [r0 + 80]
> + psubw m1, [r2 + 80]
> + pmaddwd m1, m1
> + paddd m0, m1
> + lea r0, [r0 + 2*r1]
> + lea r2, [r2 + 2*r3]
> + dec r4d
> + jnz .loop
> + phaddd m0, m0
> + phaddd m0, m0
> + movd eax, m0
> + RET
> +%endmacro
> +
> INIT_XMM sse2
> SSD_SS_ONE
> SSD_SS_12x16
> SSD_SS_24
> SSD_SS_32xN
> +SSD_SS_48
> INIT_XMM sse4
> SSD_SS_ONE
> SSD_SS_12x16
> SSD_SS_24
> SSD_SS_32xN
> +SSD_SS_48
> INIT_XMM avx
> SSD_SS_ONE
> SSD_SS_12x16
> SSD_SS_24
> SSD_SS_32xN
> +SSD_SS_48
> %endif ; !HIGH_BIT_DEPTH
>
> %if HIGH_BIT_DEPTH == 0
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mailman.videolan.org/pipermail/x265-devel/attachments/20131126/6ee02c55/attachment.html>
More information about the x265-devel
mailing list