[x265] [PATCH] asm: avx2 8bpp code for sub_ps[16x16](7.65x), sub_ps[32x32](8.37x),

Rajesh Paulraj rajesh at multicorewareinc.com
Thu Mar 12 14:10:03 CET 2015


Please ignore this patch. I will remove newline before %endrep and RET,
will resend the patch

On Thu, Mar 12, 2015 at 6:33 PM, <rajesh at multicorewareinc.com> wrote:

> # HG changeset patch
> # User Rajesh Paulraj<rajesh at multicorewareinc.com>
> # Date 1426165275 -19800
> #      Thu Mar 12 18:31:15 2015 +0530
> # Node ID 7d63cdb0d52b96c37ca59d1bcfe647ed1032158c
> # Parent  78f9c9fad129b89c6fe1ade3d59f3619bdf210b3
> asm: avx2 8bpp code for sub_ps[16x16](7.65x), sub_ps[32x32](8.37x),
>                         sub_ps[64x64](9.27x)
>
> diff -r 78f9c9fad129 -r 7d63cdb0d52b source/common/x86/asm-primitives.cpp
> --- a/source/common/x86/asm-primitives.cpp      Wed Mar 11 18:08:03 2015
> +0530
> +++ b/source/common/x86/asm-primitives.cpp      Thu Mar 12 18:31:15 2015
> +0530
> @@ -1419,6 +1419,10 @@
>          p.cu[BLOCK_32x32].add_ps = x265_pixel_add_ps_32x32_avx2;
>          p.cu[BLOCK_64x64].add_ps = x265_pixel_add_ps_64x64_avx2;
>
> +        p.cu[BLOCK_16x16].sub_ps = x265_pixel_sub_ps_16x16_avx2;
> +        p.cu[BLOCK_32x32].sub_ps = x265_pixel_sub_ps_32x32_avx2;
> +        p.cu[BLOCK_64x64].sub_ps = x265_pixel_sub_ps_64x64_avx2;
> +
>          p.pu[LUMA_16x4].pixelavg_pp = x265_pixel_avg_16x4_avx2;
>          p.pu[LUMA_16x8].pixelavg_pp = x265_pixel_avg_16x8_avx2;
>          p.pu[LUMA_16x12].pixelavg_pp = x265_pixel_avg_16x12_avx2;
> diff -r 78f9c9fad129 -r 7d63cdb0d52b source/common/x86/pixel-util8.asm
> --- a/source/common/x86/pixel-util8.asm Wed Mar 11 18:08:03 2015 +0530
> +++ b/source/common/x86/pixel-util8.asm Thu Mar 12 18:31:15 2015 +0530
> @@ -4335,6 +4335,43 @@
>  PIXELSUB_PS_W16_H4 16, 32
>  %endif
>
>
> +;-----------------------------------------------------------------------------
> +; void pixel_sub_ps_16x16(int16_t *dest, intptr_t destride, pixel *src0,
> pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
>
> +;-----------------------------------------------------------------------------
> +INIT_YMM avx2
> +cglobal pixel_sub_ps_16x16, 6, 6, 4, dest, deststride, src0, src1,
> srcstride0, srcstride1
> +    add         r1,     r1
> +
> +%rep 4
> +    pmovzxbw    m0,     [r2]
> +    pmovzxbw    m1,     [r3]
> +    pmovzxbw    m2,     [r2 + r4]
> +    pmovzxbw    m3,     [r3 + r5]
> +    lea         r2,     [r2 + r4 * 2]
> +    lea         r3,     [r3 + r5 * 2]
> +
> +    psubw       m0,     m1
> +    psubw       m2,     m3
> +
> +    movu        [r0],            m0
> +    movu        [r0 + r1],       m2
> +
> +    pmovzxbw    m0,     [r2]
> +    pmovzxbw    m1,     [r3]
> +    pmovzxbw    m2,     [r2 + r4]
> +    pmovzxbw    m3,     [r3 + r5]
> +
> +    psubw       m0,     m1
> +    psubw       m2,     m3
> +
> +    movu        [r0 + r1 * 2],   m0
> +    lea         r0,     [r0 + r1 * 2]
> +    movu        [r0 + r1],       m2
> +    lea         r0,     [r0 + r1 * 2]
> +    lea         r2,     [r2 + r4 * 2]
> +    lea         r3,     [r3 + r5 * 2]
> +%endrep
> +    RET
>
>
>  ;-----------------------------------------------------------------------------
>  ; void pixel_sub_ps_32x%2(int16_t *dest, intptr_t destride, pixel *src0,
> pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
> @@ -4469,6 +4506,133 @@
>  PIXELSUB_PS_W32_H2 32, 64
>  %endif
>
>
> +;-----------------------------------------------------------------------------
> +; void pixel_sub_ps_32x32(int16_t *dest, intptr_t destride, pixel *src0,
> pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
>
> +;-----------------------------------------------------------------------------
> +INIT_YMM avx2
> +cglobal pixel_sub_ps_32x32, 6, 6, 4, dest, deststride, src0, src1,
> srcstride0, srcstride1
> +     add         r1,    r1
> +
> +%rep 4
> +    pmovzxbw    m0,     [r2]
> +    pmovzxbw    m1,     [r2 + 16]
> +    pmovzxbw    m2,     [r3]
> +    pmovzxbw    m3,     [r3 + 16]
> +
> +    psubw       m0,     m2
> +    psubw       m1,     m3
> +
> +    movu        [r0],            m0
> +    movu        [r0 + 32],       m1
> +
> +    pmovzxbw    m0,     [r2 + r4]
> +    pmovzxbw    m1,     [r2 + r4 + 16]
> +    pmovzxbw    m2,     [r3 + r5]
> +    pmovzxbw    m3,     [r3 + r5 + 16]
> +
> +    psubw       m0,     m2
> +    psubw       m1,     m3
> +
> +    movu        [r0 + r1],       m0
> +    movu        [r0 + r1 + 32],  m1
> +
> +    lea         r2,     [r2 + r4]
> +    lea         r3,     [r3 + r5]
> +
> +    pmovzxbw    m0,     [r2 + r4]
> +    pmovzxbw    m1,     [r2 + r4 + 16]
> +    pmovzxbw    m2,     [r3 + r5]
> +    pmovzxbw    m3,     [r3 + r5 + 16]
> +
> +    psubw       m0,     m2
> +    psubw       m1,     m3
> +    lea         r0,     [r0 + r1 * 2]
> +
> +    movu        [r0 ],           m0
> +    movu        [r0 + 32],       m1
> +
> +    lea         r2,     [r2 + r4]
> +    lea         r3,     [r3 + r5]
> +
> +    pmovzxbw    m0,     [r2 + r4]
> +    pmovzxbw    m1,     [r2 + r4 + 16]
> +    pmovzxbw    m2,     [r3 + r5]
> +    pmovzxbw    m3,     [r3 + r5 + 16]
> +
> +
> +    psubw       m0,     m2
> +    psubw       m1,     m3
> +    lea         r0,     [r0 + r1]
> +
> +    movu        [r0 ],           m0
> +    movu        [r0 + 32],       m1
> +
> +    lea         r2,     [r2 + r4]
> +    lea         r3,     [r3 + r5]
> +
> +    pmovzxbw    m0,     [r2 + r4]
> +    pmovzxbw    m1,     [r2 + r4 + 16]
> +    pmovzxbw    m2,     [r3 + r5]
> +    pmovzxbw    m3,     [r3 + r5 + 16]
> +
> +    psubw       m0,     m2
> +    psubw       m1,     m3
> +    lea         r0,     [r0 + r1]
> +
> +    movu        [r0 ],           m0
> +    movu        [r0 + 32],       m1
> +
> +    lea         r2,     [r2 + r4]
> +    lea         r3,     [r3 + r5]
> +
> +    pmovzxbw    m0,     [r2 + r4]
> +    pmovzxbw    m1,     [r2 + r4 + 16]
> +    pmovzxbw    m2,     [r3 + r5]
> +    pmovzxbw    m3,     [r3 + r5 + 16]
> +
> +    psubw       m0,     m2
> +    psubw       m1,     m3
> +    lea         r0,     [r0 + r1]
> +
> +    movu        [r0 ],           m0
> +    movu        [r0 + 32],       m1
> +
> +    lea         r2,     [r2 + r4]
> +    lea         r3,     [r3 + r5]
> +
> +    pmovzxbw    m0,     [r2 + r4]
> +    pmovzxbw    m1,     [r2 + r4 + 16]
> +    pmovzxbw    m2,     [r3 + r5]
> +    pmovzxbw    m3,     [r3 + r5 + 16]
> +
> +    psubw       m0,     m2
> +    psubw       m1,     m3
> +    lea         r0,     [r0 + r1]
> +
> +    movu        [r0 ],           m0
> +    movu        [r0 + 32],       m1
> +
> +    lea         r2,     [r2 + r4]
> +    lea         r3,     [r3 + r5]
> +
> +    pmovzxbw    m0,     [r2 + r4]
> +    pmovzxbw    m1,     [r2 + r4 + 16]
> +    pmovzxbw    m2,     [r3 + r5]
> +    pmovzxbw    m3,     [r3 + r5 + 16]
> +
> +    psubw       m0,     m2
> +    psubw       m1,     m3
> +    lea         r0,     [r0 + r1]
> +
> +    movu        [r0 ],           m0
> +    movu        [r0 + 32],       m1
> +
> +    lea         r0,     [r0 + r1]
> +    lea         r2,     [r2 + r4 * 2]
> +    lea         r3,     [r3 + r5 * 2]
> +
> +%endrep
> +    RET
>
>
>  ;-----------------------------------------------------------------------------
>  ; void pixel_sub_ps_64x%2(int16_t *dest, intptr_t destride, pixel *src0,
> pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
> @@ -4688,6 +4852,112 @@
>  PIXELSUB_PS_W64_H2 64, 64
>  %endif
>
>
> +;-----------------------------------------------------------------------------
> +; void pixel_sub_ps_64x64(int16_t *dest, intptr_t destride, pixel *src0,
> pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
>
> +;-----------------------------------------------------------------------------
> +INIT_YMM avx2
> +cglobal pixel_sub_ps_64x64, 6, 6, 8, dest, deststride, src0, src1,
> srcstride0, srcstride1
> +     add         r1,    r1
> +
> +%rep 16
> +    pmovzxbw    m0,     [r2]
> +    pmovzxbw    m1,     [r2 + 16]
> +    pmovzxbw    m2,     [r2 + 32]
> +    pmovzxbw    m3,     [r2 + 48]
> +
> +    pmovzxbw    m4,     [r3]
> +    pmovzxbw    m5,     [r3 + 16]
> +    pmovzxbw    m6,     [r3 + 32]
> +    pmovzxbw    m7,     [r3 + 48]
> +
> +    psubw       m0,     m4
> +    psubw       m1,     m5
> +    psubw       m2,     m6
> +    psubw       m3,     m7
> +
> +    movu        [r0],         m0
> +    movu        [r0 + 32],    m1
> +    movu        [r0 + 64],    m2
> +    movu        [r0 + 96],    m3
> +
> +    lea         r0,     [r0 + r1]
> +    lea         r2,     [r2 + r4]
> +    lea         r3,     [r3 + r5]
> +
> +    pmovzxbw    m0,     [r2]
> +    pmovzxbw    m1,     [r2 + 16]
> +    pmovzxbw    m2,     [r2 + 32]
> +    pmovzxbw    m3,     [r2 + 48]
> +
> +    pmovzxbw    m4,     [r3]
> +    pmovzxbw    m5,     [r3 + 16]
> +    pmovzxbw    m6,     [r3 + 32]
> +    pmovzxbw    m7,     [r3 + 48]
> +
> +    psubw       m0,     m4
> +    psubw       m1,     m5
> +    psubw       m2,     m6
> +    psubw       m3,     m7
> +
> +    movu        [r0],         m0
> +    movu        [r0 + 32],    m1
> +    movu        [r0 + 64],    m2
> +    movu        [r0 + 96],    m3
> +
> +    lea         r0,     [r0 + r1]
> +    lea         r2,     [r2 + r4]
> +    lea         r3,     [r3 + r5]
> +
> +    pmovzxbw    m0,     [r2]
> +    pmovzxbw    m1,     [r2 + 16]
> +    pmovzxbw    m2,     [r2 + 32]
> +    pmovzxbw    m3,     [r2 + 48]
> +
> +    pmovzxbw    m4,     [r3]
> +    pmovzxbw    m5,     [r3 + 16]
> +    pmovzxbw    m6,     [r3 + 32]
> +    pmovzxbw    m7,     [r3 + 48]
> +
> +    psubw       m0,     m4
> +    psubw       m1,     m5
> +    psubw       m2,     m6
> +    psubw       m3,     m7
> +
> +    movu        [r0],         m0
> +    movu        [r0 + 32],    m1
> +    movu        [r0 + 64],    m2
> +    movu        [r0 + 96],    m3
> +
> +    lea         r0,     [r0 + r1]
> +    lea         r2,     [r2 + r4]
> +    lea         r3,     [r3 + r5]
> +
> +    pmovzxbw    m0,     [r2]
> +    pmovzxbw    m1,     [r2 + 16]
> +    pmovzxbw    m2,     [r2 + 32]
> +    pmovzxbw    m3,     [r2 + 48]
> +
> +    pmovzxbw    m4,     [r3]
> +    pmovzxbw    m5,     [r3 + 16]
> +    pmovzxbw    m6,     [r3 + 32]
> +    pmovzxbw    m7,     [r3 + 48]
> +
> +    psubw       m0,     m4
> +    psubw       m1,     m5
> +    psubw       m2,     m6
> +    psubw       m3,     m7
> +
> +    movu        [r0],         m0
> +    movu        [r0 + 32],    m1
> +    movu        [r0 + 64],    m2
> +    movu        [r0 + 96],    m3
> +
> +    lea         r0,     [r0 + r1]
> +    lea         r2,     [r2 + r4]
> +    lea         r3,     [r3 + r5]
> +%endrep
> +
> +    RET
>
>
>  ;=============================================================================
>  ; variance
> diff -r 78f9c9fad129 -r 7d63cdb0d52b source/common/x86/pixel.h
> --- a/source/common/x86/pixel.h Wed Mar 11 18:08:03 2015 +0530
> +++ b/source/common/x86/pixel.h Thu Mar 12 18:31:15 2015 +0530
> @@ -255,6 +255,10 @@
>  void x265_pixel_add_ps_32x32_avx2(pixel* a, intptr_t dstride, const
> pixel* b0, const int16_t* b1, intptr_t sstride0, intptr_t sstride1);
>  void x265_pixel_add_ps_64x64_avx2(pixel* a, intptr_t dstride, const
> pixel* b0, const int16_t* b1, intptr_t sstride0, intptr_t sstride1);
>
> +void x265_pixel_sub_ps_16x16_avx2(int16_t* a, intptr_t dstride, const
> pixel* b0, const pixel* b1, intptr_t sstride0, intptr_t sstride1);
> +void x265_pixel_sub_ps_32x32_avx2(int16_t* a, intptr_t dstride, const
> pixel* b0, const pixel* b1, intptr_t sstride0, intptr_t sstride1);
> +void x265_pixel_sub_ps_64x64_avx2(int16_t* a, intptr_t dstride, const
> pixel* b0, const pixel* b1, intptr_t sstride0, intptr_t sstride1);
> +
>  #undef DECL_PIXELS
>  #undef DECL_HEVC_SSD
>  #undef DECL_X1
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mailman.videolan.org/pipermail/x265-devel/attachments/20150312/49dd53fc/attachment-0001.html>


More information about the x265-devel mailing list