[x265] [PATCH] asm: psyCost_pp_32x32 in sse4: improve 110849c->13373c

Steve Borho steve at borho.org
Mon Jan 5 15:22:17 CET 2015


On 01/05, Divya Manivannan wrote:
> # HG changeset patch
> # User Divya Manivannan <divya at multicorewareinc.com>
> # Date 1420434254 -19800
> #      Mon Jan 05 10:34:14 2015 +0530
> # Node ID e93ef9322ba1214243187411bc0232e9579f72d4
> # Parent  f255e8d06423231cb8c58ab5d3b10de7fb27b424
> asm: psyCost_pp_32x32 in sse4: improve 110849c->13373c

queeud

> diff -r f255e8d06423 -r e93ef9322ba1 source/common/x86/asm-primitives.cpp
> --- a/source/common/x86/asm-primitives.cpp	Fri Jan 02 18:22:38 2015 +0530
> +++ b/source/common/x86/asm-primitives.cpp	Mon Jan 05 10:34:14 2015 +0530
> @@ -1439,6 +1439,7 @@
>  #if X86_64
>          p.psy_cost_pp[BLOCK_8x8] = x265_psyCost_pp_8x8_sse4;
>          p.psy_cost_pp[BLOCK_16x16] = x265_psyCost_pp_16x16_sse4;
> +        p.psy_cost_pp[BLOCK_32x32] = x265_psyCost_pp_32x32_sse4;
>  #endif
>      }
>      if (cpuMask & X265_CPU_XOP)
> @@ -1726,6 +1727,7 @@
>  #if X86_64
>          p.psy_cost_pp[BLOCK_8x8] = x265_psyCost_pp_8x8_sse4;
>          p.psy_cost_pp[BLOCK_16x16] = x265_psyCost_pp_16x16_sse4;
> +        p.psy_cost_pp[BLOCK_32x32] = x265_psyCost_pp_32x32_sse4;
>  #endif
>      }
>      if (cpuMask & X265_CPU_AVX)
> diff -r f255e8d06423 -r e93ef9322ba1 source/common/x86/pixel-a.asm
> --- a/source/common/x86/pixel-a.asm	Fri Jan 02 18:22:38 2015 +0530
> +++ b/source/common/x86/pixel-a.asm	Mon Jan 05 10:34:14 2015 +0530
> @@ -7131,3 +7131,216 @@
>      RET
>  %endif ; HIGH_BIT_DEPTH
>  %endif
> +
> +%if ARCH_X86_64
> +%if HIGH_BIT_DEPTH
> +INIT_XMM sse4
> +cglobal psyCost_pp_32x32, 4, 9, 14
> +
> +    FIX_STRIDES r1, r3
> +    lea             r4, [3 * r1]
> +    lea             r8, [3 * r3]
> +    mova            m12, [pw_1]
> +    mova            m13, [pd_1]
> +    pxor            m11, m11
> +    mov             r7d, 4
> +.loopH:
> +    mov             r6d, 4
> +.loopW:
> +    pxor            m10, m10
> +    movu            m0, [r0]
> +    movu            m1, [r0 + r1]
> +    movu            m2, [r0 + r1 * 2]
> +    movu            m3, [r0 + r4]
> +    lea             r5, [r0 + r1 * 4]
> +    movu            m4, [r5]
> +    movu            m5, [r5 + r1]
> +    movu            m6, [r5 + r1 * 2]
> +    movu            m7, [r5 + r4]
> +
> +    paddw           m8, m0, m1
> +    paddw           m8, m2
> +    paddw           m8, m3
> +    paddw           m8, m4
> +    paddw           m8, m5
> +    paddw           m8, m6
> +    paddw           m8, m7
> +    pmaddwd         m8, m12
> +    movhlps         m9, m8
> +    paddd           m8, m9
> +    psrldq          m9, m8, 4
> +    paddd           m8, m9
> +    psrld           m8, 2
> +
> +    HADAMARD8_2D 0, 1, 2, 3, 4, 5, 6, 7, 9, amax
> +
> +    paddd           m0, m1
> +    paddd           m0, m2
> +    paddd           m0, m3
> +    HADDUW m0, m1
> +    paddd           m0, m13
> +    psrld           m0, 1
> +    psubd           m10, m0, m8
> +
> +    movu            m0, [r2]
> +    movu            m1, [r2 + r3]
> +    movu            m2, [r2 + r3 * 2]
> +    movu            m3, [r2 + r8]
> +    lea             r5, [r2 + r3 * 4]
> +    movu            m4, [r5]
> +    movu            m5, [r5 + r3]
> +    movu            m6, [r5 + r3 * 2]
> +    movu            m7, [r5 + r8]
> +
> +    paddw           m8, m0, m1
> +    paddw           m8, m2
> +    paddw           m8, m3
> +    paddw           m8, m4
> +    paddw           m8, m5
> +    paddw           m8, m6
> +    paddw           m8, m7
> +    pmaddwd         m8, m12
> +    movhlps         m9, m8
> +    paddd           m8, m9
> +    psrldq          m9, m8, 4
> +    paddd           m8, m9
> +    psrld           m8, 2
> +
> +    HADAMARD8_2D 0, 1, 2, 3, 4, 5, 6, 7, 9, amax
> +
> +    paddd           m0, m1
> +    paddd           m0, m2
> +    paddd           m0, m3
> +    HADDUW m0, m1
> +    paddd           m0, m13
> +    psrld           m0, 1
> +    psubd           m0, m8
> +    psubd           m10, m0
> +    pabsd           m0, m10
> +    paddd           m11, m0
> +    add             r0, 16
> +    add             r2, 16
> +    dec             r6d
> +    jnz             .loopW
> +    lea             r0, [r0 + r1 * 8 - 64]
> +    lea             r2, [r2 + r3 * 8 - 64]
> +    dec             r7d
> +    jnz             .loopH
> +    movd            eax, m11
> +    RET
> +
> +%else ; !HIGH_BIT_DEPTH
> +INIT_XMM sse4
> +cglobal psyCost_pp_32x32, 4, 9, 15
> +
> +    lea             r4, [3 * r1]
> +    lea             r8, [3 * r3]
> +    mova            m8, [hmul_8p]
> +    mova            m10, [pw_1]
> +    mova            m14, [pd_1]
> +    pxor            m13, m13
> +    mov             r7d, 4
> +.loopH:
> +    mov             r6d, 4
> +.loopW:
> +    pxor            m12, m12
> +    movddup         m0, [r0]
> +    movddup         m1, [r0 + r1]
> +    movddup         m2, [r0 + r1 * 2]
> +    movddup         m3, [r0 + r4]
> +    lea             r5, [r0 + r1 * 4]
> +    movddup         m4, [r5]
> +    movddup         m5, [r5 + r1]
> +    movddup         m6, [r5 + r1 * 2]
> +    movddup         m7, [r5 + r4]
> +
> +    pmaddubsw       m0, m8
> +    pmaddubsw       m1, m8
> +    pmaddubsw       m2, m8
> +    pmaddubsw       m3, m8
> +    pmaddubsw       m4, m8
> +    pmaddubsw       m5, m8
> +    pmaddubsw       m6, m8
> +    pmaddubsw       m7, m8
> +
> +    paddw           m11, m0, m1
> +    paddw           m11, m2
> +    paddw           m11, m3
> +    paddw           m11, m4
> +    paddw           m11, m5
> +    paddw           m11, m6
> +    paddw           m11, m7
> +
> +    pmaddwd         m11, m10
> +    psrldq          m9, m11, 4
> +    paddd           m11, m9
> +    psrld           m11, 2
> +
> +    HADAMARD8_2D_HMUL 0, 1, 2, 3, 4, 5, 6, 7, 9, 9
> +
> +    paddw           m0, m1
> +    paddw           m0, m2
> +    paddw           m0, m3
> +    HADDW m0, m1
> +
> +    paddd           m0, m14
> +    psrld           m0, 1
> +    psubd           m12, m0, m11
> +
> +    movddup         m0, [r2]
> +    movddup         m1, [r2 + r3]
> +    movddup         m2, [r2 + r3 * 2]
> +    movddup         m3, [r2 + r8]
> +    lea             r5, [r2 + r3 * 4]
> +    movddup         m4, [r5]
> +    movddup         m5, [r5 + r3]
> +    movddup         m6, [r5 + r3 * 2]
> +    movddup         m7, [r5 + r8]
> +
> +    pmaddubsw       m0, m8
> +    pmaddubsw       m1, m8
> +    pmaddubsw       m2, m8
> +    pmaddubsw       m3, m8
> +    pmaddubsw       m4, m8
> +    pmaddubsw       m5, m8
> +    pmaddubsw       m6, m8
> +    pmaddubsw       m7, m8
> +
> +    paddw           m11, m0, m1
> +    paddw           m11, m2
> +    paddw           m11, m3
> +    paddw           m11, m4
> +    paddw           m11, m5
> +    paddw           m11, m6
> +    paddw           m11, m7
> +
> +    pmaddwd         m11, m10
> +    psrldq          m9, m11, 4
> +    paddd           m11, m9
> +    psrld           m11, 2
> +
> +    HADAMARD8_2D_HMUL 0, 1, 2, 3, 4, 5, 6, 7, 9, 9
> +
> +    paddw           m0, m1
> +    paddw           m0, m2
> +    paddw           m0, m3
> +    HADDW m0, m1
> +
> +    paddd           m0, m14
> +    psrld           m0, 1
> +    psubd           m0, m11
> +    psubd           m12, m0
> +    pabsd           m0, m12
> +    paddd           m13, m0
> +    add             r0, 8
> +    add             r2, 8
> +    dec             r6d
> +    jnz             .loopW
> +    lea             r0, [r0 + r1 * 8 - 32]
> +    lea             r2, [r2 + r3 * 8 - 32]
> +    dec             r7d
> +    jnz             .loopH
> +    movd            eax, m13
> +    RET
> +%endif ; HIGH_BIT_DEPTH
> +%endif
> diff -r f255e8d06423 -r e93ef9322ba1 source/common/x86/pixel.h
> --- a/source/common/x86/pixel.h	Fri Jan 02 18:22:38 2015 +0530
> +++ b/source/common/x86/pixel.h	Mon Jan 05 10:34:14 2015 +0530
> @@ -221,6 +221,7 @@
>  int x265_psyCost_pp_4x4_sse4(const pixel* source, intptr_t sstride, const pixel* recon, intptr_t rstride);
>  int x265_psyCost_pp_8x8_sse4(const pixel* source, intptr_t sstride, const pixel* recon, intptr_t rstride);
>  int x265_psyCost_pp_16x16_sse4(const pixel* source, intptr_t sstride, const pixel* recon, intptr_t rstride);
> +int x265_psyCost_pp_32x32_sse4(const pixel* source, intptr_t sstride, const pixel* recon, intptr_t rstride);
>  
>  #undef DECL_PIXELS
>  #undef DECL_HEVC_SSD
> _______________________________________________
> x265-devel mailing list
> x265-devel at videolan.org
> https://mailman.videolan.org/listinfo/x265-devel

-- 
Steve Borho


More information about the x265-devel mailing list