[x265] [PATCH 2 of 3] asm: intra_pred_ang32_25 improved by ~53% over SSE4

Praveen Tiwari praveen at multicorewareinc.com
Fri Mar 27 06:30:48 CET 2015


Please ignore duplicate patch (second), send my mistake.

Regards,
Praveen

On Fri, Mar 27, 2015 at 10:41 AM, <praveen at multicorewareinc.com> wrote:

> # HG changeset patch
> # User Praveen Tiwari <praveen at multicorewareinc.com>
> # Date 1427360000 -19800
> #      Thu Mar 26 14:23:20 2015 +0530
> # Branch stable
> # Node ID 39c139322fde1f8c62545fd8bbed9cc8198e540c
> # Parent  24bdb3e594556ca6e12ee9dae58100a6bd115d2a
> asm: intra_pred_ang32_25 improved by ~53% over SSE4
>
> AVX2:
> intra_ang_32x32[25]     23.11x   1293.83         29904.12
>
> SSE4:
> intra_ang_32x32[25]     10.31x   2759.33         28451.26
>
> diff -r 24bdb3e59455 -r 39c139322fde source/common/x86/asm-primitives.cpp
> --- a/source/common/x86/asm-primitives.cpp      Thu Mar 26 13:20:04 2015
> +0530
> +++ b/source/common/x86/asm-primitives.cpp      Thu Mar 26 14:23:20 2015
> +0530
> @@ -1643,6 +1643,7 @@
>          p.cu[BLOCK_32x32].intra_pred[31] = x265_intra_pred_ang32_31_avx2;
>          p.cu[BLOCK_32x32].intra_pred[32] = x265_intra_pred_ang32_32_avx2;
>          p.cu[BLOCK_32x32].intra_pred[33] = x265_intra_pred_ang32_33_avx2;
> +        p.cu[BLOCK_32x32].intra_pred[25] = x265_intra_pred_ang32_25_avx2;
>
>          // copy_sp primitives
>          p.cu[BLOCK_16x16].copy_sp = x265_blockcopy_sp_16x16_avx2;
> diff -r 24bdb3e59455 -r 39c139322fde source/common/x86/intrapred.h
> --- a/source/common/x86/intrapred.h     Thu Mar 26 13:20:04 2015 +0530
> +++ b/source/common/x86/intrapred.h     Thu Mar 26 14:23:20 2015 +0530
> @@ -213,6 +213,7 @@
>  void x265_intra_pred_ang32_31_avx2(pixel* dst, intptr_t dstStride, const
> pixel* srcPix, int dirMode, int bFilter);
>  void x265_intra_pred_ang32_32_avx2(pixel* dst, intptr_t dstStride, const
> pixel* srcPix, int dirMode, int bFilter);
>  void x265_intra_pred_ang32_33_avx2(pixel* dst, intptr_t dstStride, const
> pixel* srcPix, int dirMode, int bFilter);
> +void x265_intra_pred_ang32_25_avx2(pixel* dst, intptr_t dstStride, const
> pixel* srcPix, int dirMode, int bFilter);
>  void x265_all_angs_pred_4x4_sse4(pixel *dest, pixel *refPix, pixel
> *filtPix, int bLuma);
>  void x265_all_angs_pred_8x8_sse4(pixel *dest, pixel *refPix, pixel
> *filtPix, int bLuma);
>  void x265_all_angs_pred_16x16_sse4(pixel *dest, pixel *refPix, pixel
> *filtPix, int bLuma);
> diff -r 24bdb3e59455 -r 39c139322fde source/common/x86/intrapred8.asm
> --- a/source/common/x86/intrapred8.asm  Thu Mar 26 13:20:04 2015 +0530
> +++ b/source/common/x86/intrapred8.asm  Thu Mar 26 14:23:20 2015 +0530
> @@ -407,6 +407,26 @@
>                     db 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0,
> 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0
>
>
> +
> +ALIGN 32
> +c_ang32_mode_25:   db 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2,
> 30, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28
> +                   db 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6,
> 26, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24
> +                   db 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10,
> 22, 10, 22, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20
> +                   db 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14,
> 18, 14, 18, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
> +                   db 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18,
> 14, 18, 14, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12
> +                   db 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22,
> 10, 22, 10, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8
> +                   db 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6,
> 26, 6, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4
> +                   db 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2,
> 30, 2, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0
> +                   db 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2,
> 30, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28
> +                   db 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6,
> 26, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24
> +                   db 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10,
> 22, 10, 22, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20
> +                   db 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14,
> 18, 14, 18, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
> +                   db 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18,
> 14, 18, 14, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12
> +                   db 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22,
> 10, 22, 10, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8
> +                   db 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6,
> 26, 6, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4
> +                   db 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2,
> 30, 2, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0
> +
> +
>  ALIGN 32
>  ;; (blkSize - 1 - x)
>  pw_planar4_0:         dw 3,  2,  1,  0,  3,  2,  1,  0
> @@ -14108,5 +14128,155 @@
>      vpermq            m6, m6, 11011000b
>      movu              [r0 + r3], m6
>      RET
> +
> +INIT_YMM avx2
> +cglobal intra_pred_ang32_25, 3, 5, 11
> +    mova              m0, [pw_1024]
> +    mova              m1, [intra_pred_shuff_0_8]
> +    lea               r3, [3 * r1]
> +    lea               r4, [c_ang32_mode_25]
> +
> +    ;row [0, 1]
> +    vbroadcasti128    m2, [r2 + 0]
> +    pshufb            m2, m1
> +    vbroadcasti128    m3, [r2 + 8]
> +    pshufb            m3, m1
> +    vbroadcasti128    m4, [r2 + 16]
> +    pshufb            m4, m1
> +    vbroadcasti128    m5, [r2 + 24]
> +    pshufb            m5, m1
> +
> +    mova              m10, [r4 + 0 * mmsize]
> +
> +    INTRA_PRED_ANG32_CAL_ROW
> +    movu              [r0], m7
> +    movu              [r0 + r1], m6
> +
> +    ;row[2, 3]
> +    mova              m10, [r4 + 1 * mmsize]
> +
> +    INTRA_PRED_ANG32_CAL_ROW
> +    movu              [r0 + 2 * r1], m7
> +    movu              [r0 + r3], m6
> +
> +    ;row[4, 5]
> +    mova              m10, [r4 + 2 * mmsize]
> +    lea               r0, [r0 + 4 * r1]
> +
> +    INTRA_PRED_ANG32_CAL_ROW
> +    movu              [r0], m7
> +    movu              [r0 + r1], m6
> +
> +    ;row[6, 7]
> +    mova              m10, [r4 + 3 * mmsize]
> +
> +    INTRA_PRED_ANG32_CAL_ROW
> +    movu              [r0 + 2 * r1], m7
> +    movu              [r0 + r3], m6
> +
> +    ;row[8, 9]
> +    add               r4, 4 * mmsize
> +    lea               r0, [r0 + 4 * r1]
> +    mova              m10, [r4 + 0 * mmsize]
> +
> +    INTRA_PRED_ANG32_CAL_ROW
> +    movu              [r0], m7
> +    movu              [r0 + r1], m6
> +
> +    ;row[10, 11]
> +    mova              m10, [r4 + 1 * mmsize]
> +
> +    INTRA_PRED_ANG32_CAL_ROW
> +    movu              [r0 + 2 * r1], m7
> +    movu              [r0 + r3], m6
> +
> +    ;row[12, 13]
> +    mova              m10, [r4 + 2 * mmsize]
> +    lea               r0, [r0 + 4 * r1]
> +
> +    INTRA_PRED_ANG32_CAL_ROW
> +    movu              [r0], m7
> +    movu              [r0 + r1], m6
> +
> +    ;row[14, 15]
> +    mova              m10, [r4 + 3 * mmsize]
> +
> +    INTRA_PRED_ANG32_CAL_ROW
> +    movu              [r0 + 2 * r1], m7
> +    movu              [r0 + r3], m6
> +
> +    ;row[16, 17]
> +    movu              xm2, [r2 - 1]
> +    pinsrb            xm2, [r2 + 80], 0
> +    vinserti128       m2, m2, xm2, 1
> +    pshufb            m2, m1
> +    vbroadcasti128    m3, [r2 + 7]
> +    pshufb            m3, m1
> +    vbroadcasti128    m4, [r2 + 15]
> +    pshufb            m4, m1
> +    vbroadcasti128    m5, [r2 + 23]
> +    pshufb            m5, m1
> +
> +    add               r4, 4 * mmsize
> +    lea               r0, [r0 + 4 * r1]
> +    mova              m10, [r4 + 0 * mmsize]
> +
> +    INTRA_PRED_ANG32_CAL_ROW
> +    movu              [r0], m7
> +    movu              [r0 + r1], m6
> +
> +    ;row[18, 19]
> +    mova              m10, [r4 + 1 * mmsize]
> +
> +    INTRA_PRED_ANG32_CAL_ROW
> +    movu              [r0 + 2 * r1], m7
> +    movu              [r0 + r3], m6
> +
> +    ;row[20, 21]
> +    mova              m10, [r4 + 2 * mmsize]
> +    lea               r0, [r0 + 4 * r1]
> +
> +    INTRA_PRED_ANG32_CAL_ROW
> +    movu              [r0], m7
> +    movu              [r0 + r1], m6
> +
> +    ;row[22, 23]
> +    mova              m10, [r4 + 3 * mmsize]
> +
> +    INTRA_PRED_ANG32_CAL_ROW
> +    movu              [r0 + 2 * r1], m7
> +    movu              [r0 + r3], m6
> +
> +    ;row[24, 25]
> +    add               r4, 4 * mmsize
> +    lea               r0, [r0 + 4 * r1]
> +    mova              m10, [r4 + 0 * mmsize]
> +
> +    INTRA_PRED_ANG32_CAL_ROW
> +    movu              [r0], m7
> +    movu              [r0 + r1], m6
> +
> +    ;row[26, 27]
> +    mova              m10, [r4 + 1 * mmsize]
> +
> +    INTRA_PRED_ANG32_CAL_ROW
> +    movu              [r0 + 2 * r1], m7
> +    movu              [r0 + r3], m6
> +
> +    ;row[28, 29]
> +    mova              m10, [r4 + 2 * mmsize]
> +    lea               r0, [r0 + 4 * r1]
> +
> +    INTRA_PRED_ANG32_CAL_ROW
> +    movu              [r0], m7
> +    movu              [r0 + r1], m6
> +
> +    ;row[30, 31]
> +    mova              m10, [r4 + 3 * mmsize]
> +
> +    INTRA_PRED_ANG32_CAL_ROW
> +    movu              [r0 + 2 * r1], m7
> +    movu              [r0 + r3], m6
> +    RET
>  %endif
>
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mailman.videolan.org/pipermail/x265-devel/attachments/20150327/b9768c16/attachment.html>


More information about the x265-devel mailing list