[x265] [PATCH] asm: avx2 assembly code for 8bpp transpose16x16 module
Murugan Vairavel
murugan at multicorewareinc.com
Fri Oct 10 15:30:27 CEST 2014
Ignore this patch, Will send new one with some improvements.
On Thu, Oct 9, 2014 at 5:20 PM, <murugan at multicorewareinc.com> wrote:
> # HG changeset patch
> # User Murugan Vairavel <murugan at multicorewareinc.com>
> # Date 1412855249 -19800
> # Thu Oct 09 17:17:29 2014 +0530
> # Node ID 0c36fca591aefdf1df620126e3e7d2ba327609cb
> # Parent 85203bb459124dc5eb4bc929450c655b196aeb0e
> asm: avx2 assembly code for 8bpp transpose16x16 module
>
> diff -r 85203bb45912 -r 0c36fca591ae source/common/x86/asm-primitives.cpp
> --- a/source/common/x86/asm-primitives.cpp Thu Oct 09 15:59:58 2014
> +0530
> +++ b/source/common/x86/asm-primitives.cpp Thu Oct 09 17:17:29 2014
> +0530
> @@ -1792,6 +1792,7 @@
> p.idct[IDCT_8x8] = x265_idct8_avx2;
> p.idct[IDCT_16x16] = x265_idct16_avx2;
> p.idct[IDCT_32x32] = x265_idct32_avx2;
> + p.transpose[BLOCK_16x16] = x265_transpose16_avx2;
> p.transpose[BLOCK_32x32] = x265_transpose32_avx2;
> #endif
> }
> diff -r 85203bb45912 -r 0c36fca591ae source/common/x86/pixel-util.h
> --- a/source/common/x86/pixel-util.h Thu Oct 09 15:59:58 2014 +0530
> +++ b/source/common/x86/pixel-util.h Thu Oct 09 17:17:29 2014 +0530
> @@ -44,6 +44,7 @@
> void x265_transpose32_sse2(pixel *dest, pixel *src, intptr_t stride);
> void x265_transpose64_sse2(pixel *dest, pixel *src, intptr_t stride);
>
> +void x265_transpose16_avx2(pixel *dest, pixel *src, intptr_t stride);
> void x265_transpose32_avx2(pixel *dest, pixel *src, intptr_t stride);
>
> uint32_t x265_quant_sse4(int32_t *coef, int32_t *quantCoeff, int32_t
> *deltaU, int16_t *qCoef, int qBits, int add, int numCoeff);
> diff -r 85203bb45912 -r 0c36fca591ae source/common/x86/pixel-util8.asm
> --- a/source/common/x86/pixel-util8.asm Thu Oct 09 15:59:58 2014 +0530
> +++ b/source/common/x86/pixel-util8.asm Thu Oct 09 17:17:29 2014 +0530
> @@ -1632,8 +1632,105 @@
> lea r0, [r6 + 8 * r5 + 16]
> mov r3, r0
> call transpose8_internal
> -
> + RET
> %else
> +%if ARCH_X86_64 == 1
> +INIT_YMM avx2
> +cglobal transpose16, 3, 5, 16
> + lea r3, [r2 * 3]
> + lea r4, [r1 + 8 * r2]
> +
> + movu xm0, [r1]
> + movu xm1, [r1 + r2]
> + movu xm2, [r1 + 2 * r2]
> + movu xm3, [r1 + r3]
> + vinserti128 m0, m0, [r4], 1
> + vinserti128 m1, m1, [r4 + r2], 1
> + vinserti128 m2, m2, [r4 + 2 * r2], 1
> + vinserti128 m3, m3, [r4 + r3], 1
> + lea r1, [r1 + 4 * r2]
> + lea r4, [r4 + 4 * r2]
> +
> + movu xm4, [r1]
> + movu xm5, [r1 + r2]
> + movu xm6, [r1 + 2 * r2]
> + movu xm7, [r1 + r3]
> + vinserti128 m4, m4, [r4], 1
> + vinserti128 m5, m5, [r4 + r2], 1
> + vinserti128 m6, m6, [r4 + 2 * r2], 1
> + vinserti128 m7, m7, [r4 + r3], 1
> +
> + punpcklbw m8, m0, m1 ;[1 - 8 ; 1 - 8 ][1 2 9 10]
> + punpckhbw m0, m1 ;[9 - 16; 9 - 16][1 2 9 10]
> +
> + punpcklbw m1, m2, m3 ;[1 - 8 ; 1 - 8 ][3 4 11 12]
> + punpckhbw m2, m3 ;[9 - 16; 9 - 16][3 4 11 12]
> +
> + punpcklbw m3, m4, m5 ;[1 - 8 ; 1 - 8 ][5 6 13 14]
> + punpckhbw m4, m5 ;[9 - 16; 9 - 16][5 6 13 14]
> +
> + punpcklbw m5, m6, m7 ;[1 - 8 ; 1 - 8 ][7 8 15 16]
> + punpckhbw m6, m7 ;[9 - 16; 9 - 16][7 8 15 16]
> +
> + punpcklwd m7, m8, m1 ;[1 - 4 ; 1 - 4][1 2 3 4 9 10
> 11 12]
> + punpckhwd m8, m1 ;[5 - 8 ; 5 - 8][1 2 3 4 9 10
> 11 12]
> +
> + punpcklwd m1, m3, m5 ;[1 - 4 ; 1 - 4][5 6 7 8 13
> 14 15 16]
> + punpckhwd m3, m5 ;[5 - 8 ; 5 - 8][5 6 7 8 13
> 14 15 16]
> +
> + punpcklwd m5, m0, m2 ;[9 - 12; 9 - 12][1 2 3 4 9
> 10 11 12]
> + punpckhwd m0, m2 ;[13- 16; 13 - 16][1 2 3 4 9
> 10 11 12]
> +
> + punpcklwd m2, m4, m6 ;[9 - 12; 9 - 12][5 6 7 8 13
> 14 15 16]
> + punpckhwd m4, m6 ;[13- 16; 13 - 16][5 6 7 8 13
> 14 15 16]
> +
> + punpckldq m6, m7, m1 ;[1 - 2 ; 1 - 2][1 2 3 4 5 6
> 7 8 9 10 11 12 13 14 15 16]
> + punpckhdq m7, m1 ;[3 - 4 ; 3 - 4][1 2 3 4 5 6
> 7 8 9 10 11 12 13 14 15 16]
> +
> + punpckldq m1, m8, m3 ;[5 - 6 ; 5 - 6][1 2 3 4 5 6
> 7 8 9 10 11 12 13 14 15 16]
> + punpckhdq m8, m3 ;[7 - 8 ; 7 - 8][1 2 3 4 5 6
> 7 8 9 10 11 12 13 14 15 16]
> +
> + punpckldq m3, m5, m2 ;[9 - 10; 9 - 10][1 2 3 4 5
> 6 7 8 9 10 11 12 13 14 15 16]
> + punpckhdq m5, m2 ;[11- 12; 11 - 12][1 2 3 4 5
> 6 7 8 9 10 11 12 13 14 15 16]
> +
> + punpckldq m2, m0, m4 ;[13- 14; 13 - 14][1 2 3 4 5
> 6 7 8 9 10 11 12 13 14 15 16]
> + punpckhdq m0, m4 ;[15- 16; 15 - 16][1 2 3 4 5
> 6 7 8 9 10 11 12 13 14 15 16]
> +
> + vpermq m6, m6, 0xD8
> + vpermq m7, m7, 0xD8
> + vpermq m1, m1, 0xD8
> + vpermq m8, m8, 0xD8
> + vpermq m3, m3, 0xD8
> + vpermq m5, m5, 0xD8
> + vpermq m2, m2, 0xD8
> + vpermq m0, m0, 0xD8
> +
> + movu [r0 + 0 * 16], xm6
> + vextracti128 [r0 + 1 * 16], m6, 1
> +
> + movu [r0 + 2 * 16], xm7
> + vextracti128 [r0 + 3 * 16], m7, 1
> +
> + movu [r0 + 4 * 16], xm1
> + vextracti128 [r0 + 5 * 16], m1, 1
> +
> + movu [r0 + 6 * 16], xm8
> + vextracti128 [r0 + 7 * 16], m8, 1
> +
> + movu [r0 + 8 * 16], xm3
> + vextracti128 [r0 + 9 * 16], m3, 1
> +
> + movu [r0 + 10 * 16], xm5
> + vextracti128 [r0 + 11 * 16], m5, 1
> +
> + movu [r0 + 12 * 16], xm2
> + vextracti128 [r0 + 13 * 16], m2, 1
> +
> + movu [r0 + 14 * 16], xm0
> + vextracti128 [r0 + 15 * 16], m0, 1
> + RET
> +%endif
> +INIT_XMM sse2
> cglobal transpose16, 3, 5, 8, dest, src, stride
> mov r3, r0
> mov r4, r1
> @@ -1647,8 +1744,8 @@
> lea r1, [r1 + 2 * r2]
> lea r0, [r3 + 8 * 16 + 8]
> TRANSPOSE_8x8 16
> + RET
> %endif
> - RET
>
> cglobal transpose16_internal
> TRANSPOSE_8x8 r6
> @@ -1761,6 +1858,7 @@
> mov r5, r0
> call transpose16_internal
> RET
> +
> %if ARCH_X86_64 == 1
> INIT_YMM avx2
> cglobal transpose32, 3, 5, 16
>
--
With Regards,
Murugan. V
+919659287478
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mailman.videolan.org/pipermail/x265-devel/attachments/20141010/127d0ad5/attachment.html>
More information about the x265-devel
mailing list