[x265] [PATCH] asm: assembly code for pixel_sse_ss_64xN

Yuvaraj Venkatesh yuvaraj at multicorewareinc.com
Tue Nov 26 15:09:06 CET 2013


Re-sending the patch after correcting the alignment problem.


On Tue, Nov 26, 2013 at 5:43 PM, <yuvaraj at multicorewareinc.com> wrote:

> # HG changeset patch
> # User Yuvaraj Venkatesh <yuvaraj at multicorewareinc.com>
> # Date 1385467967 -19800
> #      Tue Nov 26 17:42:47 2013 +0530
> # Node ID ffd3d3d25f95e5d2f18d05265b2c8e95ab21864c
> # Parent  9aca23400805335f6cd3134d8dc045a00432c4d1
> asm: assembly code for pixel_sse_ss_64xN
>
> diff -r 9aca23400805 -r ffd3d3d25f95 source/common/x86/asm-primitives.cpp
> --- a/source/common/x86/asm-primitives.cpp      Tue Nov 26 16:59:17 2013
> +0530
> +++ b/source/common/x86/asm-primitives.cpp      Tue Nov 26 17:42:47 2013
> +0530
> @@ -109,7 +109,11 @@
>      p.sse_ss[LUMA_32x24]   = x265_pixel_ssd_ss_32x24_ ## cpu; \
>      p.sse_ss[LUMA_32x32]   = x265_pixel_ssd_ss_32x32_ ## cpu; \
>      p.sse_ss[LUMA_32x64]   = x265_pixel_ssd_ss_32x64_ ## cpu; \
> -    p.sse_ss[LUMA_48x64]   = x265_pixel_ssd_ss_48x64_ ## cpu;
> +    p.sse_ss[LUMA_48x64]   = x265_pixel_ssd_ss_48x64_ ## cpu; \
> +    p.sse_ss[LUMA_64x16]   = x265_pixel_ssd_ss_64x16_ ## cpu; \
> +    p.sse_ss[LUMA_64x32]   = x265_pixel_ssd_ss_64x32_ ## cpu; \
> +    p.sse_ss[LUMA_64x48]   = x265_pixel_ssd_ss_64x48_ ## cpu; \
> +    p.sse_ss[LUMA_64x64]   = x265_pixel_ssd_ss_64x64_ ## cpu;
>
>  #define SA8D_INTER_FROM_BLOCK(cpu) \
>      p.sa8d_inter[LUMA_4x8]  = x265_pixel_satd_4x8_ ## cpu; \
> diff -r 9aca23400805 -r ffd3d3d25f95 source/common/x86/pixel-a.asm
> --- a/source/common/x86/pixel-a.asm     Tue Nov 26 16:59:17 2013 +0530
> +++ b/source/common/x86/pixel-a.asm     Tue Nov 26 17:42:47 2013 +0530
> @@ -577,24 +577,116 @@
>      RET
>  %endmacro
>
> +%macro SSD_SS_64 1
> +cglobal pixel_ssd_ss_64x%1, 4,7,6
> +    FIX_STRIDES r1, r3
> +    mov    r4d, %1/2
> +    pxor    m0, m0
> +.loop
> +    mova    m1, [r0]
> +    psubw   m1, [r2]
> +    pmaddwd m1, m1
> +    paddd   m0, m1
> +    mova    m1, [r0 + 16]
> +    psubw   m1, [r2 + 16]
> +    pmaddwd m1, m1
> +    paddd   m0, m1
> +    mova    m1, [r0 + 32]
> +    psubw   m1, [r2 + 32]
> +    pmaddwd m1, m1
> +    paddd   m0, m1
> +    mova    m1, [r0 + 48]
> +    psubw   m1, [r2 + 48]
> +    pmaddwd m1, m1
> +    paddd   m0, m1
> +    mova    m1, [r0 + 64]
> +    psubw   m1, [r2 + 64]
> +    pmaddwd m1, m1
> +    paddd   m0, m1
> +    mova    m1, [r0 + 80]
> +    psubw   m1, [r2 + 80]
> +    pmaddwd m1, m1
> +    paddd   m0, m1
> +    mova    m1, [r0 + 96]
> +    psubw   m1, [r2 + 96]
> +    pmaddwd m1, m1
> +    paddd   m0, m1
> +    mova    m1, [r0 + 112]
> +    psubw   m1, [r2 + 112]
> +    pmaddwd m1, m1
> +    paddd   m0, m1
> +    lea     r0, [r0 + 2*r1]
> +    lea     r2, [r2 + 2*r3]
> +    mova    m1, [r0]
> +    psubw   m1, [r2]
> +    pmaddwd m1, m1
> +    paddd   m0, m1
> +    mova    m1, [r0 + 16]
> +    psubw   m1, [r2 + 16]
> +    pmaddwd m1, m1
> +    paddd   m0, m1
> +    mova    m1, [r0 + 32]
> +    psubw   m1, [r2 + 32]
> +    pmaddwd m1, m1
> +    paddd   m0, m1
> +    mova    m1, [r0 + 48]
> +    psubw   m1, [r2 + 48]
> +    pmaddwd m1, m1
> +    paddd   m0, m1
> +    mova    m1, [r0 + 64]
> +    psubw   m1, [r2 + 64]
> +    pmaddwd m1, m1
> +    paddd   m0, m1
> +    mova    m1, [r0 + 80]
> +    psubw   m1, [r2 + 80]
> +    pmaddwd m1, m1
> +    paddd   m0, m1
> +    mova    m1, [r0 + 96]
> +    psubw   m1, [r2 + 96]
> +    pmaddwd m1, m1
> +    paddd   m0, m1
> +    mova    m1, [r0 + 112]
> +    psubw   m1, [r2 + 112]
> +    pmaddwd m1, m1
> +    paddd   m0, m1
> +    lea     r0, [r0 + 2*r1]
> +    lea     r2, [r2 + 2*r3]
> +    dec     r4d
> +    jnz .loop
> +    phaddd    m0, m0
> +    phaddd    m0, m0
> +    movd     eax, m0
> +    RET
> +%endmacro
> +
> +%macro SSD_SS_64xN 0
> +SSD_SS_64 16
> +SSD_SS_64 32
> +SSD_SS_64 48
> +SSD_SS_64 64
> +%endmacro
> +
>  INIT_XMM sse2
>  SSD_SS_ONE
>  SSD_SS_12x16
>  SSD_SS_24
>  SSD_SS_32xN
>  SSD_SS_48
> +SSD_SS_64xN
>  INIT_XMM sse4
>  SSD_SS_ONE
>  SSD_SS_12x16
>  SSD_SS_24
>  SSD_SS_32xN
>  SSD_SS_48
> +SSD_SS_64xN
>  INIT_XMM avx
>  SSD_SS_ONE
>  SSD_SS_12x16
>  SSD_SS_24
>  SSD_SS_32xN
>  SSD_SS_48
> +SSD_SS_64xN
>  %endif ; !HIGH_BIT_DEPTH
>
>  %if HIGH_BIT_DEPTH == 0
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mailman.videolan.org/pipermail/x265-devel/attachments/20131126/cc6e703e/attachment.html>


More information about the x265-devel mailing list