[x265] [PATCH] Luma_hpp[64x64 , 64x48 , 64x32 , 64x16] avx2 asm code : improved 33137c->24179 , 24826c->17629 , 16726c->11766 , 7830c->5820c
chen
chenm003 at 163.com
Mon Nov 17 17:59:50 CET 2014
code is right, need modify your code style, eg: Tab to spaces
Since you finished all of width, We can optimize algorithm to reduce these vpermq and pshufd to improve performance.
At 2014-11-17 19:30:54,aasaipriya at multicorewareinc.com wrote:
># HG changeset patch
># User Aasaipriya Chandran <aasaipriya at multicorewareinc.com>
># Date 1416223817 -19800
># Mon Nov 17 17:00:17 2014 +0530
># Node ID d225eeaecba038fe09753c6e9a7081a8611b38a8
># Parent 77ff0a5f11d15369ce1c624a507dc78e25cd43fb
>Luma_hpp[64x64 , 64x48 , 64x32 , 64x16] avx2 asm code : improved 33137c->24179 , 24826c->17629 , 16726c->11766 , 7830c->5820c
>
>diff -r 77ff0a5f11d1 -r d225eeaecba0 source/common/x86/asm-primitives.cpp
>--- a/source/common/x86/asm-primitives.cpp Fri Nov 14 16:20:05 2014 +0530
>+++ b/source/common/x86/asm-primitives.cpp Mon Nov 17 17:00:17 2014 +0530
>@@ -1809,6 +1809,11 @@
> p.luma_hpp[LUMA_32x16] = x265_interp_8tap_horiz_pp_32x16_avx2;
> p.luma_hpp[LUMA_32x64] = x265_interp_8tap_horiz_pp_32x64_avx2;
>
>+ p.luma_hpp[LUMA_64x64] = x265_interp_8tap_horiz_pp_64x64_avx2;
>+ p.luma_hpp[LUMA_64x32] = x265_interp_8tap_horiz_pp_64x32_avx2;
>+ p.luma_hpp[LUMA_64x48] = x265_interp_8tap_horiz_pp_64x48_avx2;
>+ p.luma_hpp[LUMA_64x16] = x265_interp_8tap_horiz_pp_64x16_avx2;
>+
>
> p.luma_hpp[LUMA_64x64] = x265_interp_8tap_horiz_pp_64x64_avx2;
> p.luma_vpp[LUMA_4x4] = x265_interp_8tap_vert_pp_4x4_avx2;
>diff -r 77ff0a5f11d1 -r d225eeaecba0 source/common/x86/ipfilter8.asm
>--- a/source/common/x86/ipfilter8.asm Fri Nov 14 16:20:05 2014 +0530
>+++ b/source/common/x86/ipfilter8.asm Mon Nov 17 17:00:17 2014 +0530
>@@ -1035,6 +1035,117 @@
> RET
>
>
>+ %macro IPFILTER_LUMA_64x_avx2 2
>+ INIT_YMM avx2
>+cglobal interp_8tap_horiz_pp_%1x%2, 4,6,8
>+ sub r0, 3
>+ mov r4d, r4m
>+%ifdef PIC
>+ lea r5, [tab_LumaCoeff]
>+ vpbroadcastd m0, [r5 + r4 * 8]
>+ vpbroadcastd m1, [r5 + r4 * 8 + 4]
>+%else
>+ vpbroadcastd m0, [tab_LumaCoeff + r4 * 8]
>+ vpbroadcastd m1, [tab_LumaCoeff + r4 * 8 + 4]
>+%endif
>+ mova m2, [tab_Tm]
>+ movu m3, [tab_Tm + 16]
>+ vpbroadcastd m7, [pw_1]
>+
>+ ; register map
>+ ; m0 , m1 interpolate coeff
>+ ; m2 , m2 shuffle order table
>+ ; m7 - pw_1
>+
>+ mov r4d, %2
>+.loop:
>+ ; Row 0
>+ vbroadcasti128 m4, [r0] ; [x E D C B A 9 8 7 6 5 4 3 2 1 0]
>+ pshufb m5, m4, m3
>+ pshufb m4, m2
>+ pmaddubsw m4, m0
>+ pmaddubsw m5, m1
>+ paddw m4, m5
>+ pmaddwd m4, m7
>+ vbroadcasti128 m5, [r0 + 8]
>+ pshufb m6, m5, m3
>+ pshufb m5, m2
>+ pmaddubsw m5, m0
>+ pmaddubsw m6, m1
>+ paddw m5, m6
>+ pmaddwd m5, m7
>+ packssdw m4, m5 ; [17 16 15 14 07 06 05 04 13 12 11 10 03 02 01 00]
>+ pmulhrsw m4, [pw_512]
>+ packuswb m4, m4
>+ vpermq m4, m4, 11011000b
>+ pshufd xm4, xm4, 11011000b
>+ movu [r2],xm4
>+ vbroadcasti128 m4, [r0 + 16]
>+ pshufb m5, m4, m3
>+ pshufb m4, m2
>+ pmaddubsw m4, m0
>+ pmaddubsw m5, m1
>+ paddw m4, m5
>+ pmaddwd m4, m7
>+ vbroadcasti128 m5, [r0 + 24]
>+ pshufb m6, m5, m3
>+ pshufb m5, m2
>+ pmaddubsw m5, m0
>+ pmaddubsw m6, m1
>+ paddw m5, m6
>+ pmaddwd m5, m7
>+ packssdw m4, m5
>+ pmulhrsw m4, [pw_512]
>+ packuswb m4, m4
>+ vpermq m4, m4, 11011000b
>+ pshufd xm4, xm4, 11011000b
>+ movu [r2 + 16],xm4
>+ vbroadcasti128 m4, [r0 + 32]
>+ pshufb m5, m4, m3
>+ pshufb m4, m2
>+ pmaddubsw m4, m0
>+ pmaddubsw m5, m1
>+ paddw m4, m5
>+ pmaddwd m4, m7
>+ vbroadcasti128 m5, [r0 + 40]
>+ pshufb m6, m5, m3
>+ pshufb m5, m2
>+ pmaddubsw m5, m0
>+ pmaddubsw m6, m1
>+ paddw m5, m6
>+ pmaddwd m5, m7
>+ packssdw m4, m5
>+ pmulhrsw m4, [pw_512]
>+ packuswb m4, m4
>+ vpermq m4, m4, 11011000b
>+ pshufd xm4, xm4, 11011000b
>+ movu [r2 + 32],xm4
>+ vbroadcasti128 m4, [r0 + 48]
>+ pshufb m5, m4, m3
>+ pshufb m4, m2
>+ pmaddubsw m4, m0
>+ pmaddubsw m5, m1
>+ paddw m4, m5
>+ pmaddwd m4, m7
>+ vbroadcasti128 m5, [r0 + 56]
>+ pshufb m6, m5, m3
>+ pshufb m5, m2
>+ pmaddubsw m5, m0
>+ pmaddubsw m6, m1
>+ paddw m5, m6
>+ pmaddwd m5, m7
>+ packssdw m4, m5
>+ pmulhrsw m4, [pw_512]
>+ packuswb m4, m4
>+ vpermq m4, m4, 11011000b
>+ pshufd xm4, xm4, 11011000b
>+ movu [r2 + 48],xm4
>+ lea r0, [r0 + r1]
>+ lea r2, [r2 + r3]
>+ dec r4d
>+ jnz .loop
>+ RET
>+ %endmacro
>
>
> ;--------------------------------------------------------------------------------------------------------------
>@@ -1055,6 +1166,11 @@
> IPFILTER_LUMA_32x_avx2 32 , 16
> IPFILTER_LUMA_32x_avx2 32 , 64
>
>+ IPFILTER_LUMA_64x_avx2 64 , 64
>+ IPFILTER_LUMA_64x_avx2 64 , 48
>+ IPFILTER_LUMA_64x_avx2 64 , 32
>+ IPFILTER_LUMA_64x_avx2 64 , 16
>+
> ;--------------------------------------------------------------------------------------------------------------
> ; void interp_8tap_horiz_pp_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
> ;--------------------------------------------------------------------------------------------------------------
>_______________________________________________
>x265-devel mailing list
>x265-devel at videolan.org
>https://mailman.videolan.org/listinfo/x265-devel
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mailman.videolan.org/pipermail/x265-devel/attachments/20141118/f8a97399/attachment-0001.html>
More information about the x265-devel
mailing list