[x265] [PATCH] asm-intra_pred_ang16_25: improved, 781.13c -> 466.16c
chen
chenm003 at 163.com
Fri Mar 13 02:59:09 CET 2015
code is right,
in next time, adjust r4 to 4*mmsize are better, any offset more than [-128,127] will generate 4-bytes address
At 2015-03-12 21:41:23,praveen at multicorewareinc.com wrote:
># HG changeset patch
># User Praveen Tiwari <praveen at multicorewareinc.com>
># Date 1426167514 -19800
># Node ID a5648b191717022f24b48b30b6f184d2c46f91f7
># Parent d012e125bdb1299ba29b9c0680931e148981a42e
>asm-intra_pred_ang16_25: improved, 781.13c -> 466.16c
>
>AVX2:
>intra_ang_16x16[25] 18.09x 466.16 8434.26
>
>SSE4:
>intra_ang_16x16[25] 10.90x 781.13 8511.65
>
>diff -r d012e125bdb1 -r a5648b191717 source/common/x86/asm-primitives.cpp
>--- a/source/common/x86/asm-primitives.cpp Thu Mar 12 18:40:23 2015 +0530
>+++ b/source/common/x86/asm-primitives.cpp Thu Mar 12 19:08:34 2015 +0530
>@@ -1504,6 +1504,7 @@
> p.cu[BLOCK_8x8].intra_pred[12] = x265_intra_pred_ang8_12_avx2;
> p.cu[BLOCK_8x8].intra_pred[24] = x265_intra_pred_ang8_24_avx2;
> p.cu[BLOCK_8x8].intra_pred[11] = x265_intra_pred_ang8_11_avx2;
>+ p.cu[BLOCK_16x16].intra_pred[25] = x265_intra_pred_ang16_25_avx2;
>
> // copy_sp primitives
> p.cu[BLOCK_16x16].copy_sp = x265_blockcopy_sp_16x16_avx2;
>diff -r d012e125bdb1 -r a5648b191717 source/common/x86/intrapred.h
>--- a/source/common/x86/intrapred.h Thu Mar 12 18:40:23 2015 +0530
>+++ b/source/common/x86/intrapred.h Thu Mar 12 19:08:34 2015 +0530
>@@ -182,6 +182,7 @@
> void x265_intra_pred_ang8_12_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
> void x265_intra_pred_ang8_24_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
> void x265_intra_pred_ang8_11_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
>+void x265_intra_pred_ang16_25_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
> void x265_all_angs_pred_4x4_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
> void x265_all_angs_pred_8x8_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
> void x265_all_angs_pred_16x16_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
>diff -r d012e125bdb1 -r a5648b191717 source/common/x86/intrapred8.asm
>--- a/source/common/x86/intrapred8.asm Thu Mar 12 18:40:23 2015 +0530
>+++ b/source/common/x86/intrapred8.asm Thu Mar 12 19:08:34 2015 +0530
>@@ -113,6 +113,17 @@
> db 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2
> db 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24
>
>+ALIGN 32
>+c_ang16_mode_25: db 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28
>+ db 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24
>+ db 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20
>+ db 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
>+ db 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12
>+ db 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8
>+ db 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4
>+ db 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0
>+
>+ALIGN 32
> ;; (blkSize - 1 - x)
> pw_planar4_0: dw 3, 2, 1, 0, 3, 2, 1, 0
> pw_planar4_1: dw 3, 3, 3, 3, 3, 3, 3, 3
>@@ -10453,3 +10464,43 @@
> movhps [r0 + 2 * r1], xm4
> movhps [r0 + r3], xm2
> RET
>+
>+%macro INTRA_PRED_ANG16_MC0 3
>+ pmaddubsw m3, m1, [r4 + %3 * mmsize]
>+ pmulhrsw m3, m0
>+ pmaddubsw m4, m2, [r4 + %3 * mmsize]
>+ pmulhrsw m4, m0
>+ packuswb m3, m4
>+ movu [%1], xm3
>+ vextracti128 xm4, m3, 1
>+ movu [%2], xm4
>+%endmacro
>+
>+%macro INTRA_PRED_ANG16_25 1
>+ INTRA_PRED_ANG16_MC0 r0, r0 + r1, %1
>+ INTRA_PRED_ANG16_MC0 r0 + 2 * r1, r0 + r3, (%1 + 1)
>+%endmacro
>+
>+INIT_YMM avx2
>+cglobal intra_pred_ang16_25, 3, 5, 5
>+ mova m0, [pw_1024]
>+
>+ vbroadcasti128 m1, [r2]
>+ pshufb m1, [intra_pred_shuff_0_8]
>+ vbroadcasti128 m2, [r2 + 8]
>+ pshufb m2, [intra_pred_shuff_0_8]
>+
>+ lea r3, [3 * r1]
>+ lea r4, [c_ang16_mode_25]
>+
>+ INTRA_PRED_ANG16_25 0
>+
>+ lea r0, [r0 + 4 * r1]
>+ INTRA_PRED_ANG16_25 2
>+
>+ lea r0, [r0 + 4 * r1]
>+ INTRA_PRED_ANG16_25 4
>+
>+ lea r0, [r0 + 4 * r1]
>+ INTRA_PRED_ANG16_25 6
>+ RET
>_______________________________________________
>x265-devel mailing list
>x265-devel at videolan.org
>https://mailman.videolan.org/listinfo/x265-devel
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mailman.videolan.org/pipermail/x265-devel/attachments/20150313/bff355b8/attachment.html>
More information about the x265-devel
mailing list