[x265] [PATCH] asm: luma_vpp[8x8] in avx2: improve 701c->491c
Divya Manivannan
divya at multicorewareinc.com
Fri Nov 14 03:57:34 CET 2014
Thanks Chen. I will make necessary changes.
Regards,
Divya
On Fri, Nov 14, 2014 at 3:14 AM, chen <chenm003 at 163.com> wrote:
>
>
>
> At 2014-11-13 19:35:53,divya at multicorewareinc.com wrote:
> ># HG changeset patch
> ># User Divya Manivannan
> ># Date 1415878496 -19800
> ># Thu Nov 13 17:04:56 2014 +0530
> ># Node ID 5f5dabe29fee0df8cf39c7c00255005885ccd77e
> ># Parent 18aefbde72ab2dfaa0d4edeea7fd0ab4f9a09f9c
> >asm: luma_vpp[8x8] in avx2: improve 701c->491c
> >
> >diff -r 18aefbde72ab -r 5f5dabe29fee source/common/x86/asm-primitives.cpp
> >--- a/source/common/x86/asm-primitives.cpp Wed Nov 12 17:17:56 2014 -0600
> >+++ b/source/common/x86/asm-primitives.cpp Thu Nov 13 17:04:56 2014 +0530
> >@@ -1800,6 +1800,7 @@
> > #endif
> > p.luma_hpp[LUMA_4x4] = x265_interp_8tap_horiz_pp_4x4_avx2;
> > p.luma_vpp[LUMA_4x4] = x265_interp_8tap_vert_pp_4x4_avx2;
> >+ p.luma_vpp[LUMA_8x8] = x265_interp_8tap_vert_pp_8x8_avx2;
>
> your code for X64 only, here are both X86 and X64
>
>
>
> > }
> > #endif // if HIGH_BIT_DEPTH
> > }
> >diff -r 18aefbde72ab -r 5f5dabe29fee source/common/x86/ipfilter8.asm
> >--- a/source/common/x86/ipfilter8.asm Wed Nov 12 17:17:56 2014 -0600
> >+++ b/source/common/x86/ipfilter8.asm Thu Nov 13 17:04:56 2014 +0530
> >@@ -3601,6 +3601,92 @@
> > RET
> > %endmacro
> >
> >+INIT_YMM avx2
> >+%if ARCH_X86_64 == 1
> >+cglobal interp_8tap_vert_pp_8x8, 4,7,11
> >+ mov r4d, r4m
> >+ shl r4d, 6
> >+
> >+%ifdef PIC
> >+ lea r5, [tab_LumaCoeffVer]
> >+ lea r5, [r5 + r4]
> >+ vbroadcasti128 m0, [r5]
> >+ vbroadcasti128 m7, [r5 + 16]
> >+ vbroadcasti128 m8, [r5 + 32]
> >+ vbroadcasti128 m9, [r5 + 48]
> >+%else
> >+ lea r5, [tab_LumaCoeffVer + r4]
> >+ vbroadcasti128 m0, [r5]
> >+ vbroadcasti128 m7, [r5 + 16]
> >+ vbroadcasti128 m8, [r5 + 32]
> >+ vbroadcasti128 m9, [r5 + 48]
> >+%endif
> >+
> >+ lea r5, [r1 * 3]
> >+ sub r0, r5
> >+ mov r4d, 2
> >+
> >+.loop
> >+ movq xm1, [r0] ; m1 = row 0
> >+ movq xm2, [r0 + r1] ; m2 = row 1
> >+ punpcklbw xm1, xm2 ; m1 = [17 07 16 06 15 05 14 04 13 03 12 02 11 01 10 00]
> >+ movq xm3, [r0 + r1 * 2] ; m3 = row 2
> >+ punpcklbw xm2, xm3 ; m2 = [27 17 26 16 25 15 24 14 23 13 22 12 21 11 20 10]
> >+ vinserti128 m5, m1, xm2, 1 ; m5 = [27 17 26 16 25 15 24 14 23 13 22 12 21 11 20 10] - [17 07 16 06 15 05 14 04 13 03 12 02 11 01 10 00]
> >+ movq xm4, [r0 + r5] ; m4 = row 3
> >+ punpcklbw xm3, xm4 ; m3 = [37 27 36 26 35 25 34 24 33 23 32 22 31 21 30 20]
> >+ lea r6, [r0 + r1 * 4]
> >+ movq xm1, [r6] ; m1 = row 4
> >+ punpcklbw xm4, xm1 ; m4 = [47 37 46 36 45 35 44 34 43 33 42 32 41 31 40 30]
> >+ vinserti128 m6, m3, xm4, 1 ; m6 = [47 37 46 36 45 35 44 34 43 33 42 32 41 31 40 30] - [37 27 36 26 35 25 34 24 33 23 32 22 31 21 30 20]
> >+ movq xm2, [r6 + r1] ; m2 = row 5
> >+ punpcklbw xm1, xm2 ; m1 = [57 47 56 46 55 45 54 44 53 43 52 42 51 41 50 40]
> >+ movq xm4, [r6 + r1 * 2] ; m7 = row 6
> >+ punpcklbw xm2, xm4 ; m2 = [67 57 66 56 65 55 64 54 63 53 62 52 61 51 60 50]
> >+ vinserti128 m1, m1, xm2, 1 ; m1 = [67 57 66 56 65 55 64 54 63 53 62 52 61 51 60 50] - [57 47 56 46 55 45 54 44 53 43 52 42 51 41 50 40]
> >+ movq xm3, [r6 + r5] ; m3 = row 7
> >+ punpcklbw xm4, xm3 ; m4 = [77 67 76 66 75 65 74 64 73 63 72 62 71 61 70 60]
> >+ lea r6, [r6 + r1 * 4]
> >+ movq xm2, [r6] ; m7 = row 8
> >+ punpcklbw xm3, xm2 ; m3 = [87 77 86 76 85 75 84 74 83 73 82 72 81 71 80 70]
> >+ vinserti128 m4, m4, xm3, 1 ; m4 = [87 77 86 76 85 75 84 74 83 73 82 72 81 71 80 70] - [77 67 76 66 75 65 74 64 73 63 72 62 71 61 70 60]
> >+ movq xm3, [r6 + r1] ; m3 = row 9
> >+ punpcklbw xm2, xm3 ; m2 = [97 87 96 86 95 85 94 84 93 83 92 82 91 81 90 80]
> >+ movq xm10, [r6 + r1 * 2] ; m10 = row 10
> >+ punpcklbw xm3, xm10 ; m3 = [A7 97 A6 96 A5 95 A4 94 A3 93 A2 92 A1 91 A0 90]
> >+ vinserti128 m2, m2, xm3, 1 ; m2 = [A7 97 A6 96 A5 95 A4 94 A3 93 A2 92 A1 91 A0 90] - [97 87 96 86 95 85 94 84 93 83 92 82 91 81 90 80]
> >+
> >+ pmaddubsw m5, m0
> >+ pmaddubsw m3, m6, m7
> >+ paddw m5, m3
> >+ pmaddubsw m6, m0
> >+ pmaddubsw m3, m1, m8
> >+ pmaddubsw m1, m7
> >+ paddw m6, m1
> >+ pmaddubsw m1, m4, m9
> >+ paddw m3, m1
> >+ paddw m5, m3
> >+ pmaddubsw m4, m8
> >+ pmaddubsw m2, m9
> >+ paddw m4, m2
> >+ paddw m6, m4
> >+ pmulhrsw m5, [pw_512] ; m5 = word: row 0, row 1
> >+ pmulhrsw m6, [pw_512] ; m6 = word: row 2, row 3
>
> you have many free register, why not buffer it in loop?
>
>
>
> >+ packuswb m5, m6
> >+ vextracti128 xm6, m5, 1
> >+ movq [r2], xm5
> >+ movq [r2 + r3], xm6
> >+ lea r2, [r2 + r3 * 2]
> >+ movhps [r2], xm5
> >+ movhps [r2 + r3], xm6
> >+
> >+ lea r2, [r2 + r3 * 2]
> >+ lea r0, [r0 + r1 * 4]
> >+ dec r4d
> >+ jnz .loop
> >+ RET
> >+%endif
> >+
>
>
>
> For your algorithm, you get all of data and process it later, it need more register, you can try below:
>
> load 0
>
> load 1
>
> generate [1 0]
>
> load 2
>
> generate [2 1]
>
> load 3
>
> generate [3 2]
>
> filter [1 0]
>
> filter [3 2]
>
> sum [0+1+2+3]
>
> .
>
> .
>
> .
>
>
>
>
> _______________________________________________
> x265-devel mailing list
> x265-devel at videolan.org
> https://mailman.videolan.org/listinfo/x265-devel
>
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mailman.videolan.org/pipermail/x265-devel/attachments/20141114/d576c819/attachment-0001.html>
More information about the x265-devel
mailing list