[x265] [PATCH] asm: luma_vpp[16x32, 16x64] in avx2: improve 3875c->2488c, 7499c->4915c
Divya Manivannan
divya at multicorewareinc.com
Thu Nov 20 12:14:46 CET 2014
Please ignore this patch. I will clean and resend it.
Regards,
Divya
On Thu, Nov 20, 2014 at 3:19 PM, Praveen Tiwari <
praveen at multicorewareinc.com> wrote:
> tab_LumaCoeffVer_32 table of this name is already in file, redefining here
> will cause build error. Please, verify and update patch.
>
> On Thu, Nov 20, 2014 at 2:49 PM, Divya Manivannan <
> divya at multicorewareinc.com> wrote:
>
>> # HG changeset patch
>> # User Divya Manivannan <divya at multicorewareinc.com>
>> # Date 1416475133 -19800
>> # Thu Nov 20 14:48:53 2014 +0530
>> # Node ID 49c99a85531358e1b0624edd8082b6945d4e187e
>> # Parent 3649fabf90d348c51d7e155989d1bf629ec27f6e
>> asm: luma_vpp[16x32, 16x64] in avx2: improve 3875c->2488c, 7499c->4915c
>>
>> diff -r 3649fabf90d3 -r 49c99a855313 source/common/x86/asm-primitives.cpp
>> --- a/source/common/x86/asm-primitives.cpp Thu Nov 20 14:27:53 2014
>> +0530
>> +++ b/source/common/x86/asm-primitives.cpp Thu Nov 20 14:48:53 2014
>> +0530
>> @@ -1798,6 +1798,8 @@
>> p.transpose[BLOCK_16x16] = x265_transpose16_avx2;
>> p.transpose[BLOCK_32x32] = x265_transpose32_avx2;
>> p.transpose[BLOCK_64x64] = x265_transpose64_avx2;
>> + p.luma_vpp[LUMA_16x32] = x265_interp_8tap_vert_pp_16x32_avx2;
>> + p.luma_vpp[LUMA_16x64] = x265_interp_8tap_vert_pp_16x64_avx2;
>> #endif
>> p.luma_hpp[LUMA_4x4] = x265_interp_8tap_horiz_pp_4x4_avx2;
>> p.luma_vpp[LUMA_4x4] = x265_interp_8tap_vert_pp_4x4_avx2;
>> diff -r 3649fabf90d3 -r 49c99a855313 source/common/x86/ipfilter8.asm
>> --- a/source/common/x86/ipfilter8.asm Thu Nov 20 14:27:53 2014 +0530
>> +++ b/source/common/x86/ipfilter8.asm Thu Nov 20 14:48:53 2014 +0530
>> @@ -122,6 +122,27 @@
>> times 8 db 58, -10
>> times 8 db 4, -1
>>
>> +ALIGN 32
>> +tab_LumaCoeffVer_32: times 16 db 0, 0
>> + times 16 db 0, 64
>> + times 16 db 0, 0
>> + times 16 db 0, 0
>> +
>> + times 16 db -1, 4
>> + times 16 db -10, 58
>> + times 16 db 17, -5
>> + times 16 db 1, 0
>> +
>> + times 16 db -1, 4
>> + times 16 db -11, 40
>> + times 16 db 40, -11
>> + times 16 db 4, -1
>> +
>> + times 16 db 0, 1
>> + times 16 db -5, 17
>> + times 16 db 58, -10
>> + times 16 db 4, -1
>> +
>> tab_c_64_n64: times 8 db 64, -64
>>
>> const interp4_shuf, times 2 db 0, 1, 8, 9, 4, 5, 12, 13, 2, 3, 10, 11,
>> 6, 7, 14, 15
>> @@ -3755,6 +3776,312 @@
>>
>> ;-------------------------------------------------------------------------------------------------------------
>> FILTER_VER_LUMA_12xN 12, 16, ps
>>
>> +%macro FILTER_VER_LUMA_AVX2_16xN 2
>> +INIT_YMM avx2
>> +%if ARCH_X86_64 == 1
>> +cglobal interp_8tap_vert_pp_%1x%2, 4, 7, 15
>> + mov r4d, r4m
>> + shl r4d, 7
>> +
>> +%ifdef PIC
>> + lea r5, [tab_LumaCoeffVer_32]
>> + add r5, r4
>> +%else
>> + lea r5, [tab_LumaCoeffVer_32 + r4]
>> +%endif
>> +
>> + lea r4, [r1 * 3]
>> + sub r0, r4
>> + lea r6, [r1 * 4]
>> + mova m14, [pw_512]
>> + mov word [rsp], %2 / 16
>> +
>> +.loop:
>> + movu xm0, [r0] ; m0 = row 0
>> + movu xm1, [r0 + r1] ; m1 = row 1
>> + punpckhbw xm2, xm0, xm1
>> + punpcklbw xm0, xm1
>> + vinserti128 m0, m0, xm2, 1
>> + pmaddubsw m0, [r5]
>> + movu xm2, [r0 + r1 * 2] ; m2 = row 2
>> + punpckhbw xm3, xm1, xm2
>> + punpcklbw xm1, xm2
>> + vinserti128 m1, m1, xm3, 1
>> + pmaddubsw m1, [r5]
>> + movu xm3, [r0 + r4] ; m3 = row 3
>> + punpckhbw xm4, xm2, xm3
>> + punpcklbw xm2, xm3
>> + vinserti128 m2, m2, xm4, 1
>> + pmaddubsw m4, m2, [r5 + 1 * mmsize]
>> + paddw m0, m4
>> + pmaddubsw m2, [r5]
>> + lea r0, [r0 + r1 * 4]
>> + movu xm4, [r0] ; m4 = row 4
>> + punpckhbw xm5, xm3, xm4
>> + punpcklbw xm3, xm4
>> + vinserti128 m3, m3, xm5, 1
>> + pmaddubsw m5, m3, [r5 + 1 * mmsize]
>> + paddw m1, m5
>> + pmaddubsw m3, [r5]
>> + movu xm5, [r0 + r1] ; m5 = row 5
>> + punpckhbw xm6, xm4, xm5
>> + punpcklbw xm4, xm5
>> + vinserti128 m4, m4, xm6, 1
>> + pmaddubsw m6, m4, [r5 + 2 * mmsize]
>> + paddw m0, m6
>> + pmaddubsw m6, m4, [r5 + 1 * mmsize]
>> + paddw m2, m6
>> + pmaddubsw m4, [r5]
>> + movu xm6, [r0 + r1 * 2] ; m6 = row 6
>> + punpckhbw xm7, xm5, xm6
>> + punpcklbw xm5, xm6
>> + vinserti128 m5, m5, xm7, 1
>> + pmaddubsw m7, m5, [r5 + 2 * mmsize]
>> + paddw m1, m7
>> + pmaddubsw m7, m5, [r5 + 1 * mmsize]
>> + paddw m3, m7
>> + pmaddubsw m5, [r5]
>> + movu xm7, [r0 + r4] ; m7 = row 7
>> + punpckhbw xm8, xm6, xm7
>> + punpcklbw xm6, xm7
>> + vinserti128 m6, m6, xm8, 1
>> + pmaddubsw m8, m6, [r5 + 3 * mmsize]
>> + paddw m0, m8
>> + pmaddubsw m8, m6, [r5 + 2 * mmsize]
>> + paddw m2, m8
>> + pmaddubsw m8, m6, [r5 + 1 * mmsize]
>> + paddw m4, m8
>> + pmaddubsw m6, [r5]
>> + lea r0, [r0 + r1 * 4]
>> + movu xm8, [r0] ; m8 = row 8
>> + punpckhbw xm9, xm7, xm8
>> + punpcklbw xm7, xm8
>> + vinserti128 m7, m7, xm9, 1
>> + pmaddubsw m9, m7, [r5 + 3 * mmsize]
>> + paddw m1, m9
>> + pmaddubsw m9, m7, [r5 + 2 * mmsize]
>> + paddw m3, m9
>> + pmaddubsw m9, m7, [r5 + 1 * mmsize]
>> + paddw m5, m9
>> + pmaddubsw m7, [r5]
>> + movu xm9, [r0 + r1] ; m9 = row 9
>> + punpckhbw xm10, xm8, xm9
>> + punpcklbw xm8, xm9
>> + vinserti128 m8, m8, xm10, 1
>> + pmaddubsw m10, m8, [r5 + 3 * mmsize]
>> + paddw m2, m10
>> + pmaddubsw m10, m8, [r5 + 2 * mmsize]
>> + paddw m4, m10
>> + pmaddubsw m10, m8, [r5 + 1 * mmsize]
>> + paddw m6, m10
>> + pmaddubsw m8, [r5]
>> + movu xm10, [r0 + r1 * 2] ; m10 = row 10
>> + punpckhbw xm11, xm9, xm10
>> + punpcklbw xm9, xm10
>> + vinserti128 m9, m9, xm11, 1
>> + pmaddubsw m11, m9, [r5 + 3 * mmsize]
>> + paddw m3, m11
>> + pmaddubsw m11, m9, [r5 + 2 * mmsize]
>> + paddw m5, m11
>> + pmaddubsw m11, m9, [r5 + 1 * mmsize]
>> + paddw m7, m11
>> + pmaddubsw m9, [r5]
>> + movu xm11, [r0 + r4] ; m11 = row 11
>> + punpckhbw xm12, xm10, xm11
>> + punpcklbw xm10, xm11
>> + vinserti128 m10, m10, xm12, 1
>> + pmaddubsw m12, m10, [r5 + 3 * mmsize]
>> + paddw m4, m12
>> + pmaddubsw m12, m10, [r5 + 2 * mmsize]
>> + paddw m6, m12
>> + pmaddubsw m12, m10, [r5 + 1 * mmsize]
>> + paddw m8, m12
>> + pmaddubsw m10, [r5]
>> + lea r0, [r0 + r1 * 4]
>> + movu xm12, [r0] ; m12 = row 12
>> + punpckhbw xm13, xm11, xm12
>> + punpcklbw xm11, xm12
>> + vinserti128 m11, m11, xm13, 1
>> + pmaddubsw m13, m11, [r5 + 3 * mmsize]
>> + paddw m5, m13
>> + pmaddubsw m13, m11, [r5 + 2 * mmsize]
>> + paddw m7, m13
>> + pmaddubsw m13, m11, [r5 + 1 * mmsize]
>> + paddw m9, m13
>> + pmaddubsw m11, [r5]
>> +
>> + pmulhrsw m0, m14 ; m0 = word: row 0
>> + pmulhrsw m1, m14 ; m1 = word: row 1
>> + pmulhrsw m2, m14 ; m2 = word: row 2
>> + pmulhrsw m3, m14 ; m3 = word: row 3
>> + pmulhrsw m4, m14 ; m4 = word: row 4
>> + pmulhrsw m5, m14 ; m5 = word: row 5
>> + packuswb m0, m1
>> + packuswb m2, m3
>> + packuswb m4, m5
>> + vpermq m0, m0, 11011000b
>> + vpermq m2, m2, 11011000b
>> + vpermq m4, m4, 11011000b
>> + vextracti128 xm1, m0, 1
>> + vextracti128 xm3, m2, 1
>> + vextracti128 xm5, m4, 1
>> + movu [r2], xm0
>> + movu [r2 + r3], xm1
>> + lea r2, [r2 + r3 * 2]
>> + movu [r2], xm2
>> + movu [r2 + r3], xm3
>> + lea r2, [r2 + r3 * 2]
>> + movu [r2], xm4
>> + movu [r2 + r3], xm5
>> + lea r2, [r2 + r3 * 2]
>> +
>> + movu xm13, [r0 + r1] ; m13 = row 13
>> + punpckhbw xm0, xm12, xm13
>> + punpcklbw xm12, xm13
>> + vinserti128 m12, m12, xm0, 1
>> + pmaddubsw m0, m12, [r5 + 3 * mmsize]
>> + paddw m6, m0
>> + pmaddubsw m0, m12, [r5 + 2 * mmsize]
>> + paddw m8, m0
>> + pmaddubsw m0, m12, [r5 + 1 * mmsize]
>> + paddw m10, m0
>> + pmaddubsw m12, [r5]
>> + movu xm0, [r0 + r1 * 2] ; m0 = row 14
>> + punpckhbw xm1, xm13, xm0
>> + punpcklbw xm13, xm0
>> + vinserti128 m13, m13, xm1, 1
>> + pmaddubsw m1, m13, [r5 + 3 * mmsize]
>> + paddw m7, m1
>> + pmaddubsw m1, m13, [r5 + 2 * mmsize]
>> + paddw m9, m1
>> + pmaddubsw m1, m13, [r5 + 1 * mmsize]
>> + paddw m11, m1
>> + pmaddubsw m13, [r5]
>> +
>> + pmulhrsw m6, m14 ; m6 = word: row 6
>> + pmulhrsw m7, m14 ; m7 = word: row 7
>> + packuswb m6, m7
>> + vpermq m6, m6, 11011000b
>> + vextracti128 xm7, m6, 1
>> + movu [r2], xm6
>> + movu [r2 + r3], xm7
>> + lea r2, [r2 + r3 * 2]
>> +
>> + movu xm1, [r0 + r4] ; m1 = row 15
>> + punpckhbw xm2, xm0, xm1
>> + punpcklbw xm0, xm1
>> + vinserti128 m0, m0, xm2, 1
>> + pmaddubsw m2, m0, [r5 + 3 * mmsize]
>> + paddw m8, m2
>> + pmaddubsw m2, m0, [r5 + 2 * mmsize]
>> + paddw m10, m2
>> + pmaddubsw m2, m0, [r5 + 1 * mmsize]
>> + paddw m12, m2
>> + pmaddubsw m0, [r5]
>> + lea r0, [r0 + r1 * 4]
>> + movu xm2, [r0] ; m2 = row 16
>> + punpckhbw xm3, xm1, xm2
>> + punpcklbw xm1, xm2
>> + vinserti128 m1, m1, xm3, 1
>> + pmaddubsw m3, m1, [r5 + 3 * mmsize]
>> + paddw m9, m3
>> + pmaddubsw m3, m1, [r5 + 2 * mmsize]
>> + paddw m11, m3
>> + pmaddubsw m3, m1, [r5 + 1 * mmsize]
>> + paddw m13, m3
>> + pmaddubsw m1, [r5]
>> + movu xm3, [r0 + r1] ; m3 = row 17
>> + punpckhbw xm4, xm2, xm3
>> + punpcklbw xm2, xm3
>> + vinserti128 m2, m2, xm4, 1
>> + pmaddubsw m4, m2, [r5 + 3 * mmsize]
>> + paddw m10, m4
>> + pmaddubsw m4, m2, [r5 + 2 * mmsize]
>> + paddw m12, m4
>> + pmaddubsw m2, [r5 + 1 * mmsize]
>> + paddw m0, m2
>> + movu xm4, [r0 + r1 * 2] ; m4 = row 18
>> + punpckhbw xm5, xm3, xm4
>> + punpcklbw xm3, xm4
>> + vinserti128 m3, m3, xm5, 1
>> + pmaddubsw m5, m3, [r5 + 3 * mmsize]
>> + paddw m11, m5
>> + pmaddubsw m5, m3, [r5 + 2 * mmsize]
>> + paddw m13, m5
>> + pmaddubsw m3, [r5 + 1 * mmsize]
>> + paddw m1, m3
>> + movu xm5, [r0 + r4] ; m5 = row 19
>> + punpckhbw xm6, xm4, xm5
>> + punpcklbw xm4, xm5
>> + vinserti128 m4, m4, xm6, 1
>> + pmaddubsw m6, m4, [r5 + 3 * mmsize]
>> + paddw m12, m6
>> + pmaddubsw m4, [r5 + 2 * mmsize]
>> + paddw m0, m4
>> + lea r0, [r0 + r1 * 4]
>> + movu xm6, [r0] ; m6 = row 20
>> + punpckhbw xm7, xm5, xm6
>> + punpcklbw xm5, xm6
>> + vinserti128 m5, m5, xm7, 1
>> + pmaddubsw m7, m5, [r5 + 3 * mmsize]
>> + paddw m13, m7
>> + pmaddubsw m5, [r5 + 2 * mmsize]
>> + paddw m1, m5
>> + movu xm7, [r0 + r1] ; m7 = row 21
>> + punpckhbw xm2, xm6, xm7
>> + punpcklbw xm6, xm7
>> + vinserti128 m6, m6, xm2, 1
>> + pmaddubsw m6, [r5 + 3 * mmsize]
>> + paddw m0, m6
>> + movu xm2, [r0 + r1 * 2] ; m2 = row 22
>> + punpckhbw xm3, xm7, xm2
>> + punpcklbw xm7, xm2
>> + vinserti128 m7, m7, xm3, 1
>> + pmaddubsw m7, [r5 + 3 * mmsize]
>> + paddw m1, m7
>> +
>> + pmulhrsw m8, m14 ; m8 = word: row 8
>> + pmulhrsw m9, m14 ; m9 = word: row 9
>> + pmulhrsw m10, m14 ; m10 = word: row 10
>> + pmulhrsw m11, m14 ; m11 = word: row 11
>> + pmulhrsw m12, m14 ; m12 = word: row 12
>> + pmulhrsw m13, m14 ; m13 = word: row 13
>> + pmulhrsw m0, m14 ; m0 = word: row 14
>> + pmulhrsw m1, m14 ; m1 = word: row 15
>> + packuswb m8, m9
>> + packuswb m10, m11
>> + packuswb m12, m13
>> + packuswb m0, m1
>> + vpermq m8, m8, 11011000b
>> + vpermq m10, m10, 11011000b
>> + vpermq m12, m12, 11011000b
>> + vpermq m0, m0, 11011000b
>> + vextracti128 xm9, m8, 1
>> + vextracti128 xm11, m10, 1
>> + vextracti128 xm13, m12, 1
>> + vextracti128 xm1, m0, 1
>> + movu [r2], xm8
>> + movu [r2 + r3], xm9
>> + lea r2, [r2 + r3 * 2]
>> + movu [r2], xm10
>> + movu [r2 + r3], xm11
>> + lea r2, [r2 + r3 * 2]
>> + movu [r2], xm12
>> + movu [r2 + r3], xm13
>> + lea r2, [r2 + r3 * 2]
>> + movu [r2], xm0
>> + movu [r2 + r3], xm1
>> + lea r2, [r2 + r3 * 2]
>> + sub r0, r6
>> + dec word [rsp]
>> + jnz .loop
>> + RET
>> +%endif
>> +%endmacro
>> +
>> +FILTER_VER_LUMA_AVX2_16xN 16, 32
>> +FILTER_VER_LUMA_AVX2_16xN 16, 64
>> +
>>
>> ;-------------------------------------------------------------------------------------------------------------
>> ; void interp_8tap_vert_%3_%1x%2(pixel *src, intptr_t srcStride, pixel
>> *dst, intptr_t dstStride, int coeffIdx)
>>
>> ;-------------------------------------------------------------------------------------------------------------
>> _______________________________________________
>> x265-devel mailing list
>> x265-devel at videolan.org
>> https://mailman.videolan.org/listinfo/x265-devel
>>
>
>
> _______________________________________________
> x265-devel mailing list
> x265-devel at videolan.org
> https://mailman.videolan.org/listinfo/x265-devel
>
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mailman.videolan.org/pipermail/x265-devel/attachments/20141120/7801574e/attachment-0001.html>
More information about the x265-devel
mailing list