[x265] [PATCH] asm: pixelsub_ps routine for all block sizes

Steve Borho steve at borho.org
Tue Nov 12 03:14:27 CET 2013


On Mon, Nov 11, 2013 at 3:47 PM, <murugan at multicorewareinc.com> wrote:

> # HG changeset patch
> # User Murugan Vairavel <murugan at multicorewareinc.com>
> # Date 1384206405 -19800
> #      Tue Nov 12 03:16:45 2013 +0530
> # Node ID 85749f42e3fa7c03ce29903d1457dc0ed873c120
> # Parent  9642b5b6500b5553ab3ce70a360aaaadad5d7234
> asm: pixelsub_ps routine for all block sizes
>

I have this patch sitting in my patch queue, waiting for the change to
asm-primitives.cpp and other files that will make the encoder actually use
these new functions; and remove the old functions.


> diff -r 9642b5b6500b -r 85749f42e3fa source/common/x86/pixel-a.asm
> --- a/source/common/x86/pixel-a.asm     Mon Nov 11 17:41:32 2013 +0800
> +++ b/source/common/x86/pixel-a.asm     Tue Nov 12 03:16:45 2013 +0530
> @@ -5284,3 +5284,1060 @@
>      jl .loop
>      movifnidn eax, r0d
>      RET
> +
>
> +;-----------------------------------------------------------------------------
> +; void pixel_sub_ps_c_2x4(int16_t *dest, intptr_t destride, pixel *src0,
> pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
>
> +;-----------------------------------------------------------------------------
> +INIT_XMM sse4
> +cglobal pixel_sub_ps_2x4, 6, 6, 4, dest, deststride, src0, src1,
> srcstride0, srcstride1
> +
> +add         r1,    r1
> +
> +movd        m0,    [r2]
> +pinsrw      m0,    [r2 + r4],    2
> +movd        m1,    [r2 + 2 * r4]
> +
> +movd        m2,    [r3]
> +pinsrw      m2,    [r3 + r5],    2
> +movd        m3,    [r3 + 2 * r5]
> +
> +lea         r2,    [r2 + 2 * r4]
> +lea         r3,    [r3 + 2 * r5]
> +
> +pinsrw      m1,    [r2 + r4],    2
> +pinsrw      m3,    [r3 + r5],    2
> +
> +pmovzxbw    m0,    m0
> +pmovzxbw    m1,    m1
> +pmovzxbw    m2,    m2
> +pmovzxbw    m3,    m3
> +
> +psubw       m0,    m2
> +psubw       m1,    m3
> +
> +movd      [r0],            m0
> +pextrd    [r0 + r1],       m0,    2
> +movd      [r0 + 2* r1],    m1
> +
> +lea     r0,              [r0 + 2 * r1]
> +
> +pextrd    [r0 + r1],       m1,    2
> +
> +RET
> +
>
> +;-----------------------------------------------------------------------------
> +; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel
> *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
>
> +;-----------------------------------------------------------------------------
> +%macro PIXELSUB_PS_W2_H4 2
> +INIT_XMM sse4
> +cglobal pixel_sub_ps_%1x%2, 6, 7, 4, dest, deststride, src0, src1,
> srcstride0, srcstride1
> +
> +add    r1,     r1
> +mov    r6d,    %2/4
> +
> +.loop
> +
> +    movd        m0,    [r2]
> +    pinsrw      m0,    [r2 + r4],    2
> +    movd        m1,    [r2 + 2 * r4]
> +
> +    movd        m2,    [r3]
> +    pinsrw      m2,    [r3 + r5],    2
> +    movd        m3,    [r3 + 2 * r5]
> +
> +    lea         r2,    [r2 + 2 * r4]
> +    lea         r3,    [r3 + 2 * r5]
> +
> +    pinsrw      m1,    [r2 + r4],    2
> +    pinsrw      m3,    [r3 + r5],    2
> +
> +    pmovzxbw    m0,    m0
> +    pmovzxbw    m1,    m1
> +    pmovzxbw    m2,    m2
> +    pmovzxbw    m3,    m3
> +
> +    psubw       m0,    m2
> +    psubw       m1,    m3
> +
> +    movd      [r0],            m0
> +    pextrd    [r0 + r1],       m0,    2
> +    movd      [r0 + 2* r1],    m1
> +
> +    lea     r0,              [r0 + 2 * r1]
> +
> +    pextrd    [r0 + r1],       m1,    2
> +
> +    lea     r2,              [r2 + 2 * r4]
> +    lea     r3,              [r3 + 2 * r5]
> +    lea     r0,              [r0 + 2 * r1]
> +
> +    dec     r6d
> +
> +jnz    .loop
> +
> +RET
> +%endmacro
> +
> +PIXELSUB_PS_W2_H4 2, 8
> +
>
> +;-----------------------------------------------------------------------------
> +; void pixel_sub_sp_c_4x2(int16_t *dest, intptr_t destride, pixel *src0,
> pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
>
> +;-----------------------------------------------------------------------------
> +INIT_XMM sse4
> +cglobal pixel_sub_ps_4x2, 6, 6, 4, dest, deststride, src0, src1,
> srcstride0, srcstride1
> +
> +add          r1,    r1
> +
> +movd         m0,    [r2]
> +movd         m1,    [r3]
> +
> +movd         m2,    [r2 + r4]
> +movd         m3,    [r3 + r5]
> +
> +punpckldq    m0,    m2
> +punpckldq    m1,    m3
> +pmovzxbw     m0,    m0
> +pmovzxbw     m1,    m1
> +
> +psubw        m0,    m1
> +
> +movlps    [r0],         m0
> +movhps    [r0 + r1],    m0
> +
> +RET
> +
>
> +;-----------------------------------------------------------------------------
> +; void pixel_sub_ps_c_4x4(int16_t *dest, intptr_t destride, pixel *src0,
> pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
>
> +;-----------------------------------------------------------------------------
> +INIT_XMM sse4
> +cglobal pixel_sub_ps_4x4, 6, 6, 8, dest, deststride, src0, src1,
> srcstride0, srcstride1
> +
> +add          r1,    r1
> +
> +movd         m0,    [r2]
> +movd         m1,    [r3]
> +
> +movd         m2,    [r2 + r4]
> +movd         m3,    [r3 + r5]
> +
> +movd         m4,    [r2 + 2 * r4]
> +movd         m5,    [r3 + 2 * r5]
> +
> +lea          r2,    [r2 + 2 * r4]
> +lea          r3,    [r3 + 2 * r5]
> +
> +movd         m6,    [r2 + r4]
> +movd         m7,    [r3 + r5]
> +
> +punpckldq    m0,    m2
> +punpckldq    m1,    m3
> +punpckldq    m4,    m6
> +punpckldq    m5,    m7
> +
> +pmovzxbw     m0,    m0
> +pmovzxbw     m1,    m1
> +pmovzxbw     m4,    m4
> +pmovzxbw     m5,    m5
> +
> +psubw        m0,    m1
> +psubw        m4,    m5
> +
> +movlps    [r0],             m0
> +movhps    [r0 + r1],        m0
> +movlps    [r0 + 2 * r1],    m4
> +
> +lea       r0,               [r0 + 2 * r1]
> +
> +movhps    [r0 + r1],        m4
> +
> +RET
> +
>
> +;-----------------------------------------------------------------------------
> +; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel
> *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
>
> +;-----------------------------------------------------------------------------
> +%macro PIXELSUB_PS_W4_H4 2
> +INIT_XMM sse4
> +cglobal pixel_sub_ps_%1x%2, 6, 7, 8, dest, deststride, src0, src1,
> srcstride0, srcstride1
> +
> +add    r1,     r1
> +mov    r6d,    %2/4
> +
> +.loop
> +
> +    movd         m0,    [r2]
> +    movd         m1,    [r3]
> +
> +    movd         m2,    [r2 + r4]
> +    movd         m3,    [r3 + r5]
> +
> +    movd         m4,    [r2 + 2 * r4]
> +    movd         m5,    [r3 + 2 * r5]
> +
> +    lea          r2,    [r2 + 2 * r4]
> +    lea          r3,    [r3 + 2 * r5]
> +
> +    movd         m6,    [r2 + r4]
> +    movd         m7,    [r3 + r5]
> +
> +    punpckldq    m0,    m2
> +    punpckldq    m1,    m3
> +    punpckldq    m4,    m6
> +    punpckldq    m5,    m7
> +
> +    pmovzxbw     m0,    m0
> +    pmovzxbw     m1,    m1
> +    pmovzxbw     m4,    m4
> +    pmovzxbw     m5,    m5
> +
> +    psubw        m0,    m1
> +    psubw        m4,    m5
> +
> +    movlps    [r0],             m0
> +    movhps    [r0 + r1],        m0
> +    movlps    [r0 + 2 * r1],    m4
> +
> +    lea       r0,               [r0 + 2 * r1]
> +
> +    movhps    [r0 + r1],        m4
> +
> +    lea       r2,               [r2 + 2 * r4]
> +    lea       r3,               [r3 + 2 * r5]
> +    lea       r0,               [r0 + 2 * r1]
> +
> +    dec       r6d
> +
> +jnz    .loop
> +
> +RET
> +%endmacro
> +
> +PIXELSUB_PS_W4_H4 4, 8
> +PIXELSUB_PS_W4_H4 4, 16
> +
>
> +;-----------------------------------------------------------------------------
> +; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel
> *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
>
> +;-----------------------------------------------------------------------------
> +%macro PIXELSUB_PS_W6_H4 2
> +INIT_XMM sse4
> +cglobal pixel_sub_ps_%1x%2, 6, 7, 8, dest, deststride, src0, src1,
> srcstride0, srcstride1
> +
> +add    r1,     r1
> +mov    r6d,    %2/4
> +
> +.loop
> +
> +    movh        m0,    [r2]
> +    movh        m1,    [r3]
> +
> +    movh        m2,    [r2 + r4]
> +    movh        m3,    [r3 + r5]
> +
> +    movh        m4,    [r2 + 2 * r4]
> +    movh        m5,    [r3 + 2 * r5]
> +
> +    lea         r2,    [r2 + 2 * r4]
> +    lea         r3,    [r3 + 2 * r5]
> +
> +    movh        m6,    [r2 + r4]
> +    movh        m7,    [r3 + r5]
> +
> +    pmovzxbw    m0,    m0
> +    pmovzxbw    m1,    m1
> +    pmovzxbw    m2,    m2
> +    pmovzxbw    m3,    m3
> +    pmovzxbw    m4,    m4
> +    pmovzxbw    m5,    m5
> +    pmovzxbw    m6,    m6
> +    pmovzxbw    m7,    m7
> +
> +    psubw       m0,    m1
> +    psubw       m2,    m3
> +    psubw       m4,    m5
> +    psubw       m6,    m7
> +
> +    movh      [r0],                 m0
> +    pextrd    [r0 + 8],             m0,    2
> +    movh      [r0 + r1],            m2
> +    pextrd    [r0 + r1 + 8],        m2,    2
> +    movh      [r0 + 2* r1],         m4
> +    pextrd    [r0 + 2 * r1 + 8],    m4,    2
> +
> +    lea       r0,                   [r0 + 2 * r1]
> +
> +    movh      [r0 + r1],            m6
> +    pextrd    [r0 + r1 + 8],        m6,    2
> +
> +    lea       r2,                   [r2 + 2 * r4]
> +    lea       r3,                   [r3 + 2 * r5]
> +    lea       r0,                   [r0 + 2 * r1]
> +
> +    dec     r6d
> +
> +jnz    .loop
> +
> +RET
> +%endmacro
> +
> +PIXELSUB_PS_W6_H4 6, 8
> +
>
> +;-----------------------------------------------------------------------------
> +; void pixel_sub_ps_c_8x2(int16_t *dest, intptr_t destride, pixel *src0,
> pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
>
> +;-----------------------------------------------------------------------------
> +INIT_XMM sse4
> +cglobal pixel_sub_ps_8x2, 6, 6, 4, dest, deststride, src0, src1,
> srcstride0, srcstride1
> +
> +add         r1,    r1
> +
> +movh        m0,    [r2]
> +movh        m1,    [r3]
> +pmovzxbw    m0,    m0
> +pmovzxbw    m1,    m1
> +
> +movh        m2,    [r2 + r4]
> +movh        m3,    [r3 + r5]
> +pmovzxbw    m2,    m2
> +pmovzxbw    m3,    m3
> +
> +psubw       m0,    m1
> +psubw       m2,    m3
> +
> +movu    [r0],         m0
> +movu    [r0 + r1],    m2
> +
> +RET
> +
>
> +;-----------------------------------------------------------------------------
> +; void pixel_sub_ps_c_8x4(int16_t *dest, intptr_t destride, pixel *src0,
> pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
>
> +;-----------------------------------------------------------------------------
> +INIT_XMM sse4
> +cglobal pixel_sub_ps_8x4, 6, 6, 8, dest, deststride, src0, src1,
> srcstride0, srcstride1
> +
> +add         r1,    r1
> +
> +movh        m0,    [r2]
> +movh        m1,    [r3]
> +pmovzxbw    m0,    m0
> +pmovzxbw    m1,    m1
> +
> +movh        m2,    [r2 + r4]
> +movh        m3,    [r3 + r5]
> +pmovzxbw    m2,    m2
> +pmovzxbw    m3,    m3
> +
> +movh        m4,    [r2 + 2 * r4]
> +movh        m5,    [r3 + 2 * r5]
> +pmovzxbw    m4,    m4
> +pmovzxbw    m5,    m5
> +
> +psubw       m0,    m1
> +psubw       m2,    m3
> +psubw       m4,    m5
> +
> +lea         r2,    [r2 + 2 * r4]
> +lea         r3,    [r3 + 2 * r5]
> +
> +movh        m6,    [r2 + r4]
> +movh        m7,    [r3 + r5]
> +pmovzxbw    m6,    m6
> +pmovzxbw    m7,    m7
> +
> +psubw       m6,    m7
> +
> +movu    [r0],             m0
> +movu    [r0 + r1],        m2
> +movu    [r0 + 2 * r1],    m4
> +
> +lea     r0,               [r0 + 2 * r1]
> +
> +movu    [r0 + r1],        m6
> +
> +RET
> +
>
> +;-----------------------------------------------------------------------------
> +; void pixel_sub_ps_c_8x6(int16_t *dest, intptr_t destride, pixel *src0,
> pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
>
> +;-----------------------------------------------------------------------------
> +INIT_XMM sse4
> +cglobal pixel_sub_ps_8x6, 6, 6, 8, dest, deststride, src0, src1,
> srcstride0, srcstride1
> +
> +add         r1,    r1
> +
> +movh        m0,    [r2]
> +movh        m1,    [r3]
> +pmovzxbw    m0,    m0
> +pmovzxbw    m1,    m1
> +
> +movh        m2,    [r2 + r4]
> +movh        m3,    [r3 + r5]
> +pmovzxbw    m2,    m2
> +pmovzxbw    m3,    m3
> +
> +movh        m4,    [r2 + 2 * r4]
> +movh        m5,    [r3 + 2 * r5]
> +pmovzxbw    m4,    m4
> +pmovzxbw    m5,    m5
> +
> +psubw       m0,    m1
> +psubw       m2,    m3
> +psubw       m4,    m5
> +
> +lea         r2,    [r2 + 2 * r4]
> +lea         r3,    [r3 + 2 * r5]
> +
> +movh        m6,    [r2 + r4]
> +movh        m7,    [r3 + r5]
> +pmovzxbw    m6,    m6
> +pmovzxbw    m7,    m7
> +
> +movh        m1,    [r2 + 2 * r4]
> +movh        m3,    [r3 + 2 * r5]
> +pmovzxbw    m1,    m1
> +pmovzxbw    m3,    m3
> +
> +psubw       m6,    m7
> +psubw       m1,    m3
> +
> +lea         r2,    [r2 + 2 * r4]
> +lea         r3,    [r3 + 2 * r5]
> +
> +movh        m3,    [r2 + r4]
> +movh        m5,    [r3 + r5]
> +pmovzxbw    m3,    m3
> +pmovzxbw    m5,    m5
> +
> +psubw       m3,     m5
> +
> +movu    [r0],             m0
> +movu    [r0 + r1],        m2
> +movu    [r0 + 2 * r1],    m4
> +
> +lea     r0,               [r0 + 2 * r1]
> +
> +movu    [r0 + r1],        m6
> +movu    [r0 + 2 * r1],    m1
> +
> +lea     r0,               [r0 + 2 * r1]
> +
> +movu    [r0 + r1],        m3
> +
> +RET
> +
>
> +;-----------------------------------------------------------------------------
> +; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel
> *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
>
> +;-----------------------------------------------------------------------------
> +%macro PIXELSUB_PS_W8_H4 2
> +INIT_XMM sse4
> +cglobal pixel_sub_ps_%1x%2, 6, 7, 8, dest, deststride, src0, src1,
> srcstride0, srcstride1
> +
> +add    r1,     r1
> +mov    r6d,    %2/4
> +
> +.loop
> +
> +    movh        m0,    [r2]
> +    movh        m1,    [r3]
> +    pmovzxbw    m0,    m0
> +    pmovzxbw    m1,    m1
> +
> +    movh        m2,    [r2 + r4]
> +    movh        m3,    [r3 + r5]
> +    pmovzxbw    m2,    m2
> +    pmovzxbw    m3,    m3
> +
> +    movh        m4,    [r2 + 2 * r4]
> +    movh        m5,    [r3 + 2 * r5]
> +    pmovzxbw    m4,    m4
> +    pmovzxbw    m5,    m5
> +
> +    psubw       m0,    m1
> +    psubw       m2,    m3
> +    psubw       m4,    m5
> +
> +    lea         r2,    [r2 + 2 * r4]
> +    lea         r3,    [r3 + 2 * r5]
> +
> +    movh        m6,    [r2 + r4]
> +    movh        m7,    [r3 + r5]
> +    pmovzxbw    m6,    m6
> +    pmovzxbw    m7,    m7
> +
> +    psubw       m6,    m7
> +
> +    movu    [r0],             m0
> +    movu    [r0 + r1],        m2
> +    movu    [r0 + 2 * r1],    m4
> +
> +    lea     r0,               [r0 + 2 * r1]
> +
> +    movu    [r0 + r1],        m6
> +
> +    lea     r2,               [r2 + 2 * r4]
> +    lea     r3,               [r3 + 2 * r5]
> +    lea     r0,               [r0 + 2 * r1]
> +
> +    dec     r6d
> +
> +jnz    .loop
> +
> +RET
> +%endmacro
> +
> +PIXELSUB_PS_W8_H4 8, 8
> +PIXELSUB_PS_W8_H4 8, 16
> +PIXELSUB_PS_W8_H4 8, 32
> +
>
> +;-----------------------------------------------------------------------------
> +; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel
> *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
>
> +;-----------------------------------------------------------------------------
> +%macro PIXELSUB_PS_W12_H4 2
> +INIT_XMM sse4
> +cglobal pixel_sub_ps_%1x%2, 6, 7, 6, dest, deststride, src0, src1,
> srcstride0, srcstride1
> +
> +add    r1,     r1
> +mov    r6d,    %2/4
> +
> +.loop
> +
> +    movu        m0,    [r2]
> +    movu        m1,    [r3]
> +    movu        m2,    [r2 + r4]
> +    movu        m3,    [r3 + r5]
> +
> +    mova        m4,    m0
> +    mova        m5,    m1
> +    punpckhdq   m4,    m2
> +    punpckhdq   m5,    m3
> +
> +    pmovzxbw    m0,    m0
> +    pmovzxbw    m1,    m1
> +    pmovzxbw    m2,    m2
> +    pmovzxbw    m3,    m3
> +    pmovzxbw    m4,    m4
> +    pmovzxbw    m5,    m5
> +
> +    psubw       m0,    m1
> +    psubw       m2,    m3
> +    psubw       m4,    m5
> +
> +    movu      [r0],              m0
> +    movlps    [r0 + 16],         m4
> +    movu      [r0 + r1],         m2
> +    movhps    [r0 + r1 + 16],    m4
> +
> +    movu      m0,    [r2 + 2 * r4]
> +    movu      m1,    [r3 + 2 * r5]
> +
> +    lea       r2,    [r2 + 2 * r4]
> +    lea       r3,    [r3 + 2 * r5]
> +
> +    movu      m2,    [r2 + r4]
> +    movu      m3,    [r3 + r5]
> +
> +    mova         m4,    m0
> +    mova         m5,    m1
> +    punpckhdq    m4,    m2
> +    punpckhdq    m5,    m3
> +
> +    pmovzxbw     m0,    m0
> +    pmovzxbw     m1,    m1
> +    pmovzxbw     m2,    m2
> +    pmovzxbw     m3,    m3
> +    pmovzxbw     m4,    m4
> +    pmovzxbw     m5,    m5
> +
> +    psubw        m0,    m1
> +    psubw        m2,    m3
> +    psubw        m4,    m5
> +
> +    movu      [r0 + 2 * r1],         m0
> +    movlps    [r0 + 2 * r1 + 16],    m4
> +
> +    lea       r0,                    [r0 + 2 * r1]
> +
> +    movu      [r0 + r1],             m2
> +    movhps    [r0 + r1 + 16],        m4
> +
> +    lea       r2,                    [r2 + 2 * r4]
> +    lea       r3,                    [r3 + 2 * r5]
> +    lea       r0,                    [r0 + 2 * r1]
> +
> +    dec    r6d
> +
> +jnz    .loop
> +
> +RET
> +%endmacro
> +
> +PIXELSUB_PS_W12_H4 12, 16
> +
>
> +;-----------------------------------------------------------------------------
> +; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel
> *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
>
> +;-----------------------------------------------------------------------------
> +%macro PIXELSUB_PS_W16_H4 2
> +INIT_XMM sse4
> +cglobal pixel_sub_ps_%1x%2, 6, 7, 7, dest, deststride, src0, src1,
> srcstride0, srcstride1
> +
> +add    r1,     r1
> +mov    r6d,    %2/4
> +pxor   m6,     m6
> +
> +.loop
> +
> +    movu         m1,    [r2]
> +    pmovzxbw     m0,    m1
> +    punpckhbw    m1,    m6
> +    movu         m3,    [r3]
> +    pmovzxbw     m2,    m3
> +    punpckhbw    m3,    m6
> +
> +    psubw        m0,    m2
> +    psubw        m1,    m3
> +
> +    movu         m5,    [r2 + r4]
> +    pmovzxbw     m4,    m5
> +    punpckhbw    m5,    m6
> +    movu         m3,    [r3 + r5]
> +    pmovzxbw     m2,    m3
> +    punpckhbw    m3,    m6
> +
> +    psubw        m4,    m2
> +    psubw        m5,    m3
> +
> +    movu    [r0],              m0
> +    movu    [r0 + 16],         m1
> +    movu    [r0 + r1],         m4
> +    movu    [r0 + r1 + 16],    m5
> +
> +    movu         m1,    [r2 + 2 * r4]
> +    pmovzxbw     m0,    m1
> +    punpckhbw    m1,    m6
> +    movu         m3,    [r3 + 2 * r5]
> +    pmovzxbw     m2,    m3
> +    punpckhbw    m3,    m6
> +
> +    lea          r2,    [r2 + 2 * r4]
> +    lea          r3,    [r3 + 2 * r5]
> +
> +    psubw        m0,    m2
> +    psubw        m1,    m3
> +
> +    movu         m5,    [r2 + r4]
> +    pmovzxbw     m4,    m5
> +    punpckhbw    m5,    m6
> +    movu         m3,    [r3 + r5]
> +    pmovzxbw     m2,    m3
> +    punpckhbw    m3,    m6
> +
> +    psubw        m4,    m2
> +    psubw        m5,    m3
> +
> +    movu    [r0 + 2 * r1],         m0
> +    movu    [r0 + 2 * r1 + 16],    m1
> +
> +    lea     r0,                    [r0 + 2 * r1]
> +
> +    movu    [r0 + r1],             m4
> +    movu    [r0 + r1 + 16],        m5
> +
> +    lea     r2,                    [r2 + 2 * r4]
> +    lea     r3,                    [r3 + 2 * r5]
> +    lea     r0,                    [r0 + 2 * r1]
> +
> +    dec    r6d
> +
> +jnz    .loop
> +
> +RET
> +%endmacro
> +
> +PIXELSUB_PS_W16_H4 16, 4
> +PIXELSUB_PS_W16_H4 16, 8
> +PIXELSUB_PS_W16_H4 16, 12
> +PIXELSUB_PS_W16_H4 16, 16
> +PIXELSUB_PS_W16_H4 16, 32
> +PIXELSUB_PS_W16_H4 16, 64
> +
>
> +;-----------------------------------------------------------------------------
> +; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel
> *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
>
> +;-----------------------------------------------------------------------------
> +%macro PIXELSUB_PS_W24_H2 2
> +INIT_XMM sse4
> +cglobal pixel_sub_ps_%1x%2, 6, 7, 7, dest, deststride, src0, src1,
> srcstride0, srcstride1
> +
> +add    r1,     r1
> +mov    r6d,    %2/2
> +pxor   m6,     m6
> +
> +.loop
> +
> +    movu         m1,    [r2]
> +    pmovzxbw     m0,    m1
> +    punpckhbw    m1,    m6
> +    movh         m2,    [r2 + 16]
> +    pmovzxbw     m2,    m2
> +    movu         m4,    [r3]
> +    pmovzxbw     m3,    m4
> +    punpckhbw    m4,    m6
> +    movh         m5,    [r3 + 16]
> +    pmovzxbw     m5,    m5
> +
> +    psubw        m0,    m3
> +    psubw        m1,    m4
> +    psubw        m2,    m5
> +
> +    movu    [r0],         m0
> +    movu    [r0 + 16],    m1
> +    movu    [r0 + 32],    m2
> +
> +    movu         m1,    [r2 + r4]
> +    pmovzxbw     m0,    m1
> +    punpckhbw    m1,    m6
> +    movh         m2,    [r2 + r4 + 16]
> +    pmovzxbw     m2,    m2
> +    movu         m4,    [r3 + r5]
> +    pmovzxbw     m3,    m4
> +    punpckhbw    m4,    m6
> +    movh         m5,    [r3 + r5 + 16]
> +    pmovzxbw     m5,    m5
> +
> +    psubw        m0,    m3
> +    psubw        m1,    m4
> +    psubw        m2,    m5
> +
> +    movu    [r0 + r1],         m0
> +    movu    [r0 + r1 + 16],    m1
> +    movu    [r0 + r1 + 32],    m2
> +
> +    lea    r2,    [r2 + 2 * r4]
> +    lea    r3,    [r3 + 2 * r5]
> +    lea    r0,    [r0 + 2 * r1]
> +
> +    dec    r6d
> +
> +jnz    .loop
> +
> +RET
> +%endmacro
> +
> +PIXELSUB_PS_W24_H2 24, 32
> +
>
> +;-----------------------------------------------------------------------------
> +; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel
> *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
>
> +;-----------------------------------------------------------------------------
> +%macro PIXELSUB_PS_W32_H2 2
> +INIT_XMM sse4
> +cglobal pixel_sub_ps_%1x%2, 6, 7, 8, dest, deststride, src0, src1,
> srcstride0, srcstride1
> +
> +add    r1,     r1
> +mov    r6d,    %2/2
> +
> +.loop
> +
> +    movh        m0,    [r2]
> +    movh        m1,    [r2 + 8]
> +    movh        m2,    [r2 + 16]
> +    movh        m3,    [r2 + 24]
> +    movh        m4,    [r3]
> +    movh        m5,    [r3 + 8]
> +    movh        m6,    [r3 + 16]
> +    movh        m7,    [r3 + 24]
> +
> +    pmovzxbw    m0,    m0
> +    pmovzxbw    m1,    m1
> +    pmovzxbw    m2,    m2
> +    pmovzxbw    m3,    m3
> +    pmovzxbw    m4,    m4
> +    pmovzxbw    m5,    m5
> +    pmovzxbw    m6,    m6
> +    pmovzxbw    m7,    m7
> +
> +    psubw       m0,    m4
> +    psubw       m1,    m5
> +    psubw       m2,    m6
> +    psubw       m3,    m7
> +
> +    movu    [r0],         m0
> +    movu    [r0 + 16],    m1
> +    movu    [r0 + 32],    m2
> +    movu    [r0 + 48],    m3
> +
> +    movh        m0,    [r2 + r4]
> +    movh        m1,    [r2 + r4 + 8]
> +    movh        m2,    [r2 + r4 + 16]
> +    movh        m3,    [r2 + r4 + 24]
> +    movh        m4,    [r3 + r5]
> +    movh        m5,    [r3 + r5 + 8]
> +    movh        m6,    [r3 + r5 + 16]
> +    movh        m7,    [r3 + r5 + 24]
> +
> +    pmovzxbw    m0,    m0
> +    pmovzxbw    m1,    m1
> +    pmovzxbw    m2,    m2
> +    pmovzxbw    m3,    m3
> +    pmovzxbw    m4,    m4
> +    pmovzxbw    m5,    m5
> +    pmovzxbw    m6,    m6
> +    pmovzxbw    m7,    m7
> +
> +    psubw       m0,    m4
> +    psubw       m1,    m5
> +    psubw       m2,    m6
> +    psubw       m3,    m7
> +
> +    movu    [r0 + r1],         m0
> +    movu    [r0 + r1 + 16],    m1
> +    movu    [r0 + r1 + 32],    m2
> +    movu    [r0 + r1 + 48],    m3
> +
> +    lea    r2,    [r2 + 2 * r4]
> +    lea    r3,    [r3 + 2 * r5]
> +    lea    r0,    [r0 + 2 * r1]
> +
> +    dec    r6d
> +
> +jnz    .loop
> +
> +RET
> +%endmacro
> +
> +PIXELSUB_PS_W32_H2 32, 8
> +PIXELSUB_PS_W32_H2 32, 16
> +PIXELSUB_PS_W32_H2 32, 24
> +PIXELSUB_PS_W32_H2 32, 32
> +PIXELSUB_PS_W32_H2 32, 64
> +
>
> +;-----------------------------------------------------------------------------
> +; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel
> *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
>
> +;-----------------------------------------------------------------------------
> +%macro PIXELSUB_PS_W48_H2 2
> +INIT_XMM sse4
> +cglobal pixel_sub_ps_%1x%2, 6, 7, 7, dest, deststride, src0, src1,
> srcstride0, srcstride1
> +
> +add    r1,     r1
> +mov    r6d,    %2/2
> +pxor   m6,    m6
> +
> +.loop
> +
> +    movu         m1,    [r2]
> +    pmovzxbw     m0,    m1
> +    punpckhbw    m1,    m6
> +    movu         m3,    [r3]
> +    pmovzxbw     m2,    m3
> +    punpckhbw    m3,    m6
> +    movu         m5,    [r2 + 16]
> +    pmovzxbw     m4,    m5
> +    punpckhbw    m5,    m6
> +
> +    psubw        m0,    m2
> +    psubw        m1,    m3
> +
> +    movu    [r0],         m0
> +    movu    [r0 + 16],    m1
> +
> +    movu         m3,    [r3 + 16]
> +    pmovzxbw     m2,    m3
> +    punpckhbw    m3,    m6
> +
> +    psubw        m4,    m2
> +    psubw        m5,    m3
> +
> +    movu    [r0 + 32],    m4
> +    movu    [r0 + 48],    m5
> +
> +    movu         m1,    [r2 + 32]
> +    pmovzxbw     m0,    m1
> +    punpckhbw    m1,    m6
> +    movu         m3,    [r3 + 32]
> +    pmovzxbw     m2,    m3
> +    punpckhbw    m3,    m6
> +
> +    psubw        m0,    m2
> +    psubw        m1,    m3
> +
> +    movu    [r0 + 64],    m0
> +    movu    [r0 + 80],    m1
> +
> +    movu         m1,    [r2 + r4]
> +    pmovzxbw     m0,    m1
> +    punpckhbw    m1,    m6
> +    movu         m3,    [r3 + r5]
> +    pmovzxbw     m2,    m3
> +    punpckhbw    m3,    m6
> +    movu         m5,    [r2 + r5 + 16]
> +    pmovzxbw     m4,    m5
> +    punpckhbw    m5,    m6
> +
> +    psubw        m0,    m2
> +    psubw        m1,    m3
> +
> +    movu    [r0 + r1],         m0
> +    movu    [r0 + r1 + 16],    m1
> +
> +    movu         m3,    [r3 + r4 + 16]
> +    pmovzxbw     m2,    m3
> +    punpckhbw    m3,    m6
> +
> +    psubw        m4,    m2
> +    psubw        m5,    m3
> +
> +    movu    [r0 + r1 + 32],    m4
> +    movu    [r0 + r1 + 48],    m5
> +
> +    movu         m1,    [r2 + r4 + 32]
> +    pmovzxbw     m0,    m1
> +    punpckhbw    m1,    m6
> +    movu         m3,    [r3 + r5 + 32]
> +    pmovzxbw     m2,    m3
> +    punpckhbw    m3,    m6
> +
> +    psubw        m0,    m2
> +    psubw        m1,    m3
> +
> +    movu    [r0 + r1 + 64],    m0
> +    movu    [r0 + r1 + 80],    m1
> +
> +    lea     r2,                [r2 + 2 * r4]
> +    lea     r3,                [r3 + 2 * r5]
> +    lea     r0,                [r0 + 2 * r1]
> +
> +    dec    r6d
> +
> +jnz    .loop
> +
> +RET
> +%endmacro
> +
> +PIXELSUB_PS_W48_H2 48, 64
> +
>
> +;-----------------------------------------------------------------------------
> +; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel
> *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
>
> +;-----------------------------------------------------------------------------
> +%macro PIXELSUB_PS_W64_H2 2
> +INIT_XMM sse4
> +cglobal pixel_sub_ps_%1x%2, 6, 7, 7, dest, deststride, src0, src1,
> srcstride0, srcstride1
> +
> +add    r1,     r1
> +mov    r6d,    %2/2
> +pxor   m6,    m6
> +
> +.loop
> +
> +    movu         m1,    [r2]
> +    pmovzxbw     m0,    m1
> +    punpckhbw    m1,    m6
> +    movu         m3,    [r3]
> +    pmovzxbw     m2,    m3
> +    punpckhbw    m3,    m6
> +    movu         m5,    [r2 + 16]
> +    pmovzxbw     m4,    m5
> +    punpckhbw    m5,    m6
> +
> +    psubw        m0,    m2
> +    psubw        m1,    m3
> +
> +    movu    [r0],         m0
> +    movu    [r0 + 16],    m1
> +
> +    movu         m1,    [r3 + 16]
> +    pmovzxbw     m0,    m1
> +    punpckhbw    m1,    m6
> +    movu         m3,    [r2 + 32]
> +    pmovzxbw     m2,    m3
> +    punpckhbw    m3,    m6
> +
> +    psubw        m4,    m0
> +    psubw        m5,    m1
> +
> +    movu    [r0 + 32],    m4
> +    movu    [r0 + 48],    m5
> +
> +    movu         m5,    [r3 + 32]
> +    pmovzxbw     m4,    m5
> +    punpckhbw    m5,    m6
> +    movu         m1,    [r2 + 48]
> +    pmovzxbw     m0,    m1
> +    punpckhbw    m1,    m6
> +
> +    psubw        m2,    m4
> +    psubw        m3,    m5
> +
> +    movu    [r0 + 64],    m2
> +    movu    [r0 + 80],    m3
> +
> +    movu         m3,    [r3 + 48]
> +    pmovzxbw     m2,    m3
> +    punpckhbw    m3,    m6
> +    movu         m5,    [r2 + r4]
> +    pmovzxbw     m4,    m5
> +    punpckhbw    m5,    m6
> +
> +    psubw        m0,    m2
> +    psubw        m1,    m3
> +
> +    movu    [r0 + 96],     m0
> +    movu    [r0 + 112],    m1
> +
> +    movu         m1,    [r3 + r5]
> +    pmovzxbw     m0,    m1
> +    punpckhbw    m1,    m6
> +    movu         m3,    [r2 + r4 + 16]
> +    pmovzxbw     m2,    m3
> +    punpckhbw    m3,    m6
> +
> +    psubw        m4,    m0
> +    psubw        m5,    m1
> +
> +    movu    [r0 + r1],         m4
> +    movu    [r0 + r1 + 16],    m5
> +
> +    movu         m5,    [r3 + r5 + 16]
> +    pmovzxbw     m4,    m5
> +    punpckhbw    m5,    m6
> +    movu         m1,    [r2 + r4 + 32]
> +    pmovzxbw     m0,    m1
> +    punpckhbw    m1,    m6
> +
> +    psubw        m2,    m4
> +    psubw        m3,    m5
> +
> +    movu    [r0 + r1 + 32],    m2
> +    movu    [r0 + r1 + 48],    m3
> +
> +    movu         m3,    [r3 + r5 + 32]
> +    pmovzxbw     m2,    m3
> +    punpckhbw    m3,    m6
> +    movu         m5,    [r2 + r4 + 48]
> +    pmovzxbw     m4,    m5
> +    punpckhbw    m5,    m6
> +
> +    psubw        m0,    m2
> +    psubw        m1,    m3
> +
> +    movu    [r0 + r1 + 64],    m0
> +    movu    [r0 + r1 + 80],    m1
> +
> +    movu         m1,    [r3 + r5 + 48]
> +    pmovzxbw     m0,    m1
> +    punpckhbw    m1,    m6
> +
> +    psubw        m4,    m0
> +    psubw        m5,    m1
> +
> +    movu    [r0 + r1 + 96],     m4
> +    movu    [r0 + r1 + 112],    m5
> +
> +    lea     r2,                 [r2 + 2 * r4]
> +    lea     r3,                 [r3 + 2 * r5]
> +    lea     r0,                 [r0 + 2 * r1]
> +
> +    dec    r6d
> +
> +jnz    .loop
> +
> +RET
> +%endmacro
> +
> +PIXELSUB_PS_W64_H2 64, 16
> +PIXELSUB_PS_W64_H2 64, 32
> +PIXELSUB_PS_W64_H2 64, 48
> +PIXELSUB_PS_W64_H2 64, 64
> _______________________________________________
> x265-devel mailing list
> x265-devel at videolan.org
> https://mailman.videolan.org/listinfo/x265-devel
>



-- 
Steve Borho
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mailman.videolan.org/pipermail/x265-devel/attachments/20131111/8026e56a/attachment-0001.html>


More information about the x265-devel mailing list