[x265] [PATCH] asm: assembly code for pixel_sse_ss_24x32
Yuvaraj Venkatesh
yuvaraj at multicorewareinc.com
Tue Nov 26 14:59:28 CET 2013
Re-sending the patch by correcting the alignment issue.
On Tue, Nov 26, 2013 at 6:43 PM, chen <chenm003 at 163.com> wrote:
> At 2013-11-26 18:45:25,yuvaraj at multicorewareinc.com wrote:
>
> ># HG changeset patch
> ># User Yuvaraj Venkatesh <yuvaraj at multicorewareinc.com>
> ># Date 1385462702 -19800
> ># Tue Nov 26 16:15:02 2013 +0530
> ># Node ID 52738c22dce02e8d59cc4b09f1e1b23a0a8360c5
> ># Parent 116d91f08fcb123d4b088df5c1400e599306b6f8
> >asm: assembly code for pixel_sse_ss_24x32
> >
> >diff -r 116d91f08fcb -r 52738c22dce0 source/common/x86/asm-primitives.cpp
> >--- a/source/common/x86/asm-primitives.cpp Tue Nov 26 14:19:27 2013 +0800
> >+++ b/source/common/x86/asm-primitives.cpp Tue Nov 26 16:15:02 2013 +0530
> >@@ -103,6 +103,7 @@
> > p.sse_ss[LUMA_16x16] = x265_pixel_ssd_ss_16x16_ ## cpu; \
> > p.sse_ss[LUMA_16x32] = x265_pixel_ssd_ss_16x32_ ## cpu; \
> > p.sse_ss[LUMA_16x64] = x265_pixel_ssd_ss_16x64_ ## cpu; \
> >+ p.sse_ss[LUMA_24x32] = x265_pixel_ssd_ss_24x32_ ## cpu; \
> > p.sse_ss[LUMA_32x8] = x265_pixel_ssd_ss_32x8_ ## cpu; \
> > p.sse_ss[LUMA_32x16] = x265_pixel_ssd_ss_32x16_ ## cpu; \
> > p.sse_ss[LUMA_32x24] = x265_pixel_ssd_ss_32x24_ ## cpu; \
> >diff -r 116d91f08fcb -r 52738c22dce0 source/common/x86/pixel-a.asm
> >--- a/source/common/x86/pixel-a.asm Tue Nov 26 14:19:27 2013 +0800
> >+++ b/source/common/x86/pixel-a.asm Tue Nov 26 16:15:02 2013 +0530
> >@@ -469,17 +469,62 @@
> > SSD_SS_32 64
> > %endmacro
> >
> >+%macro SSD_SS_24 0
> >+cglobal pixel_ssd_ss_24x32, 4,7,6
> >+ FIX_STRIDES r1, r3
> >+ mov r4d, 16
> >+ pxor m0, m0
> >+.loop
> >+ mova m1, [r0]
> >+ psubw m1, [r2]
> this is right, but it is unsafe, I am not sure the input pointer is
> alignment
>
> >+ pmaddwd m1, m1
> >+ paddd m0, m1
> >+ mova m1, [r0 + 16]
> >+ psubw m1, [r2 + 16]
> >+ pmaddwd m1, m1
> >+ paddd m0, m1
> >+ mova m1, [r0 + 32]
> >+ psubw m1, [r2 + 32]
> >+ pmaddwd m1, m1
> >+ paddd m0, m1
> >+ lea r0, [r0 + 2*r1]
> >+ lea r2, [r2 + 2*r3]
> >+ mova m1, [r0]
> >+ psubw m1, [r2]
> >+ pmaddwd m1, m1
> >+ paddd m0, m1
> >+ mova m1, [r0 + 16]
> >+ psubw m1, [r2 + 16]
> >+ pmaddwd m1, m1
> >+ paddd m0, m1
> >+ mova m1, [r0 + 32]
> >+ psubw m1, [r2 + 32]
> >+ pmaddwd m1, m1
> >+ paddd m0, m1
> >+ lea r0, [r0 + 2*r1]
> >+ lea r2, [r2 + 2*r3]
> >+ dec r4d
> >+ jnz .loop
> >+ phaddd m0, m0
> >+ phaddd m0, m0
> >+ movd eax, m0
> >+ RET
> >+%endmacro
> >+
> > INIT_XMM sse2
> > SSD_SS_ONE
> > SSD_SS_12x16
> >+SSD_SS_24
> > SSD_SS_32xN
> > INIT_XMM sse4
> > SSD_SS_ONE
> > SSD_SS_12x16
> >+SSD_SS_24
> > SSD_SS_32xN
> > INIT_XMM avx
> > SSD_SS_ONE
> > SSD_SS_12x16
> >+SSD_SS_24
> > SSD_SS_32xN
> > %endif ; !HIGH_BIT_DEPTH
> >
> >@@ -7696,9 +7741,6 @@
> > %endif ; !ARCH_X86_64
> > %endmacro ; SA8D
> >
>
> >-;=============================================================================
> >-; INTRA SATD
>
> >-;=============================================================================
> > %define TRANS TRANS_SSE2
> > %define DIFFOP DIFF_UNPACK_SSE2
> > %define LOAD_SUMSUB_8x4P LOAD_DIFF_8x4P
> >_______________________________________________
> >x265-devel mailing list
> >x265-devel at videolan.org
> >https://mailman.videolan.org/listinfo/x265-devel
>
> _______________________________________________
> x265-devel mailing list
> x265-devel at videolan.org
> https://mailman.videolan.org/listinfo/x265-devel
>
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mailman.videolan.org/pipermail/x265-devel/attachments/20131126/80ceb3f3/attachment-0001.html>
More information about the x265-devel
mailing list