[x265] [PATCH] avx2: 'integral12h' asm code -> 2.42x faster than 'C' version

Ashok Kumar Mishra ashok at multicorewareinc.com
Thu Jun 15 15:08:40 CEST 2017


pushed

On Wed, Jun 14, 2017 at 4:07 PM, <jayashri at multicorewareinc.com> wrote:

> # HG changeset patch
> # User Jayashri Murugan <jayashri at multicorewareinc.com>
> # Date 1496834769 -19800
> #      Wed Jun 07 16:56:09 2017 +0530
> # Node ID 6d1fea9c34c5a1b7cb062ce3126df7de50170419
> # Parent  324ee113f48943791ec295013012d30fcbe16338
> avx2: 'integral12h' asm code -> 2.42x faster than 'C' version
>
>     BIT_DEPTH = 8     : integral_init12h  2.42x    807.20          1950.56
>     BIT_DEPTH = 10|12 : integral_init12h  1.55x    1260.02         1958.27
>
> diff -r 324ee113f489 -r 6d1fea9c34c5 source/common/x86/asm-primitives.cpp
> --- a/source/common/x86/asm-primitives.cpp      Wed Jun 07 15:37:49 2017
> +0530
> +++ b/source/common/x86/asm-primitives.cpp      Wed Jun 07 16:56:09 2017
> +0530
> @@ -2166,6 +2166,7 @@
>          p.integral_initv[INTEGRAL_32] = PFX(integral32v_avx2);
>          p.integral_inith[INTEGRAL_4] = PFX(integral4h_avx2);
>          p.integral_inith[INTEGRAL_8] = PFX(integral8h_avx2);
> +        p.integral_inith[INTEGRAL_12] = PFX(integral12h_avx2);
>
>          /* TODO: This kernel needs to be modified to work with
> HIGH_BIT_DEPTH only
>          p.planeClipAndMax = PFX(planeClipAndMax_avx2); */
> @@ -3713,6 +3714,7 @@
>          p.integral_initv[INTEGRAL_32] = PFX(integral32v_avx2);
>          p.integral_inith[INTEGRAL_4] = PFX(integral4h_avx2);
>          p.integral_inith[INTEGRAL_8] = PFX(integral8h_avx2);
> +        p.integral_inith[INTEGRAL_12] = PFX(integral12h_avx2);
>
>      }
>  #endif
> diff -r 324ee113f489 -r 6d1fea9c34c5 source/common/x86/seaintegral.asm
> --- a/source/common/x86/seaintegral.asm Wed Jun 07 15:37:49 2017 +0530
> +++ b/source/common/x86/seaintegral.asm Wed Jun 07 16:56:09 2017 +0530
> @@ -389,14 +389,204 @@
>      RET
>  %endif
>
> +%macro INTEGRAL_TWELVE_HORIZONTAL_16 0
> +    pmovzxbw       m0, [r1]
> +    pmovzxbw       m1, [r1 + 1]
> +    paddw          m0, m1
> +    pmovzxbw       m1, [r1 + 2]
> +    paddw          m0, m1
> +    pmovzxbw       m1, [r1 + 3]
> +    paddw          m0, m1
> +    pmovzxbw       m1, [r1 + 4]
> +    paddw          m0, m1
> +    pmovzxbw       m1, [r1 + 5]
> +    paddw          m0, m1
> +    pmovzxbw       m1, [r1 + 6]
> +    paddw          m0, m1
> +    pmovzxbw       m1, [r1 + 7]
> +    paddw          m0, m1
> +    pmovzxbw       m1, [r1 + 8]
> +    paddw          m0, m1
> +    pmovzxbw       m1, [r1 + 9]
> +    paddw          m0, m1
> +    pmovzxbw       m1, [r1 + 10]
> +    paddw          m0, m1
> +    pmovzxbw       m1, [r1 + 11]
> +    paddw          m0, m1
> +%endmacro
> +
> +%macro INTEGRAL_TWELVE_HORIZONTAL_4 0
> +    movd           xm0, [r1]
> +    movd           xm1, [r1 + 1]
> +    pmovzxbw       xm0, xm0
> +    pmovzxbw       xm1, xm1
> +    paddw          xm0, xm1
> +    movd           xm1, [r1 + 2]
> +    pmovzxbw       xm1, xm1
> +    paddw          xm0, xm1
> +    movd           xm1, [r1 + 3]
> +    pmovzxbw       xm1, xm1
> +    paddw          xm0, xm1
> +    movd           xm1, [r1 + 4]
> +    pmovzxbw       xm1, xm1
> +    paddw          xm0, xm1
> +    movd           xm1, [r1 + 5]
> +    pmovzxbw       xm1, xm1
> +    paddw          xm0, xm1
> +    movd           xm1, [r1 + 6]
> +    pmovzxbw       xm1, xm1
> +    paddw          xm0, xm1
> +    movd           xm1, [r1 + 7]
> +    pmovzxbw       xm1, xm1
> +    paddw          xm0, xm1
> +    movd           xm1, [r1 + 8]
> +    pmovzxbw       xm1, xm1
> +    paddw          xm0, xm1
> +    movd           xm1, [r1 + 9]
> +    pmovzxbw       xm1, xm1
> +    paddw          xm0, xm1
> +    movd           xm1, [r1 + 10]
> +    pmovzxbw       xm1, xm1
> +    paddw          xm0, xm1
> +    movd           xm1, [r1 + 11]
> +    pmovzxbw       xm1, xm1
> +    paddw          xm0, xm1
> +%endmacro
> +
> +%macro INTEGRAL_TWELVE_HORIZONTAL_8_HBD 0
> +    pmovzxwd       m0, [r1]
> +    pmovzxwd       m1, [r1 + 2]
> +    paddd          m0, m1
> +    pmovzxwd       m1, [r1 + 4]
> +    paddd          m0, m1
> +    pmovzxwd       m1, [r1 + 6]
> +    paddd          m0, m1
> +    pmovzxwd       m1, [r1 + 8]
> +    paddd          m0, m1
> +    pmovzxwd       m1, [r1 + 10]
> +    paddd          m0, m1
> +    pmovzxwd       m1, [r1 + 12]
> +    paddd          m0, m1
> +    pmovzxwd       m1, [r1 + 14]
> +    paddd          m0, m1
> +    pmovzxwd       m1, [r1 + 16]
> +    paddd          m0, m1
> +    pmovzxwd       m1, [r1 + 18]
> +    paddd          m0, m1
> +    pmovzxwd       m1, [r1 + 20]
> +    paddd          m0, m1
> +    pmovzxwd       m1, [r1 + 22]
> +    paddd          m0, m1
> +%endmacro
> +
> +%macro INTEGRAL_TWELVE_HORIZONTAL_4_HBD 0
> +    pmovzxwd       xm0, [r1]
> +    pmovzxwd       xm1, [r1 + 2]
> +    paddd          xm0, xm1
> +    pmovzxwd       xm1, [r1 + 4]
> +    paddd          xm0, xm1
> +    pmovzxwd       xm1, [r1 + 6]
> +    paddd          xm0, xm1
> +    pmovzxwd       xm1, [r1 + 8]
> +    paddd          xm0, xm1
> +    pmovzxwd       xm1, [r1 + 10]
> +    paddd          xm0, xm1
> +    pmovzxwd       xm1, [r1 + 12]
> +    paddd          xm0, xm1
> +    pmovzxwd       xm1, [r1 + 14]
> +    paddd          xm0, xm1
> +    pmovzxwd       xm1, [r1 + 16]
> +    paddd          xm0, xm1
> +    pmovzxwd       xm1, [r1 + 18]
> +    paddd          xm0, xm1
> +    pmovzxwd       xm1, [r1 + 20]
> +    paddd          xm0, xm1
> +    pmovzxwd       xm1, [r1 + 22]
> +    paddd          xm0, xm1
> +%endmacro
> +
>  ;-----------------------------------------------------------
> ------------------
>  ;static void integral_init12h_c(uint32_t *sum, pixel *pix, intptr_t
> stride)
>  ;-----------------------------------------------------------
> ------------------
>  INIT_YMM avx2
> -cglobal integral12h, 3, 3, 0
> -
> +%if HIGH_BIT_DEPTH
> +cglobal integral12h, 3, 5, 3
> +    lea            r3, [4 * r2]
> +    sub            r0, r3
> +    sub            r2, 12                      ;stride - 12
> +    mov            r4, r2
> +    shr            r4, 3
> +
> +.loop:
> +    INTEGRAL_TWELVE_HORIZONTAL_8_HBD
> +    movu           m1, [r0]
> +    paddd          m0, m1
> +    movu           [r0 + r3], m0
> +    add            r1, 16
> +    add            r0, 32
> +    sub            r2, 8
> +    sub            r4, 1
> +    jnz            .loop
> +    INTEGRAL_TWELVE_HORIZONTAL_4_HBD
> +    movu           xm1, [r0]
> +    paddd          xm0, xm1
> +    movu           [r0 + r3], xm0
>      RET
>
> +%else
> +cglobal integral12h, 3, 5, 3
> +    lea            r3, [4 * r2]
> +    sub            r0, r3
> +    sub            r2, 12                      ;stride - 12
> +    mov            r4, r2
> +    shr            r4, 4
> +
> +.loop_16:
> +    INTEGRAL_TWELVE_HORIZONTAL_16
> +    vperm2i128     m2, m0, m0, 1
> +    pmovzxwd       m2, xm2
> +    pmovzxwd       m0, xm0
> +    movu           m1, [r0]
> +    paddd          m0, m1
> +    movu           [r0 + r3], m0
> +    movu           m1, [r0 + 32]
> +    paddd          m2, m1
> +    movu           [r0 + r3 + 32], m2
> +    add            r1, 16
> +    add            r0, 64
> +    sub            r2, 16
> +    sub            r4, 1
> +    jnz            .loop_16
> +    cmp            r2, 12
> +    je             .loop_12
> +    cmp            r2, 4
> +    je             .loop_4
> +
> +.loop_12:
> +    INTEGRAL_TWELVE_HORIZONTAL_16
> +    vperm2i128     m2, m0, m0, 1
> +    pmovzxwd       xm2, xm2
> +    pmovzxwd       m0, xm0
> +    movu           m1, [r0]
> +    paddd          m0, m1
> +    movu           [r0 + r3], m0
> +    movu           xm1, [r0 + 32]
> +    paddd          xm2, xm1
> +    movu           [r0 + r3 + 32], xm2
> +    jmp             .end
> +
> +.loop_4:
> +    INTEGRAL_TWELVE_HORIZONTAL_4
> +    pmovzxwd       xm0, xm0
> +    movu           xm1, [r0]
> +    paddd          xm0, xm1
> +    movu           [r0 + r3], xm0
> +    jmp            .end
> +
> +.end
> +    RET
> +%endif
> +
>  ;-----------------------------------------------------------
> ------------------
>  ;static void integral_init16h_c(uint32_t *sum, pixel *pix, intptr_t
> stride)
>  ;-----------------------------------------------------------
> ------------------
> _______________________________________________
> x265-devel mailing list
> x265-devel at videolan.org
> https://mailman.videolan.org/listinfo/x265-devel
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mailman.videolan.org/pipermail/x265-devel/attachments/20170615/eb9e3671/attachment.html>


More information about the x265-devel mailing list