[x265] [PATCH] blockcopy_pp: 32x8, 32x16, 32x24, 32x32, 32x48, 32x64 AVX version of asm code, approx double speedup comapre to SSE
chen
chenm003 at 163.com
Fri Sep 19 22:39:24 CEST 2014
- Previous message: [x265] [PATCH] blockcopy_pp: 32x8, 32x16, 32x24, 32x32, 32x48, 32x64 AVX version of asm code, approx double speedup comapre to SSE
- Next message: [x265] [PATCH] blockcopy_pp: 64x16, 64x32, 64x48, 64x64 AVX version of asm code, approx double speedup comapre to SSE
- Messages sorted by:
[ date ]
[ thread ]
[ subject ]
[ author ]
in this code, you didn't need m6 and m7, they need save and restore on stack.
At 2014-09-19 17:31:40,praveen at multicorewareinc.com wrote:
># HG changeset patch
># User Praveen Tiwari
># Date 1411119080 -19800
># Node ID e441b1fe3f7870f4aa066680f7a55e547f13dfb8
># Parent 4680ab4f92b8cc809b1e8dbc927126ec70bcc5c5
>blockcopy_pp: 32x8, 32x16, 32x24, 32x32, 32x48, 32x64 AVX version of asm code, approx double speedup comapre to SSE
>
>diff -r 4680ab4f92b8 -r e441b1fe3f78 source/common/x86/asm-primitives.cpp
>--- a/source/common/x86/asm-primitives.cpp Thu Sep 18 18:16:25 2014 +0530
>+++ b/source/common/x86/asm-primitives.cpp Fri Sep 19 15:01:20 2014 +0530
>@@ -1709,6 +1709,24 @@
>
> p.ssim_4x4x2_core = x265_pixel_ssim_4x4x2_core_avx;
> p.ssim_end_4 = x265_pixel_ssim_end4_avx;
>+
>+ /* Will be replaced with macro calls once all the partitions are coded */
>+
>+ p.chroma[X265_CSP_I420].copy_pp[CHROMA_32x8] = x265_blockcopy_pp_32x8_avx;
>+ p.chroma[X265_CSP_I420].copy_pp[CHROMA_32x16] = x265_blockcopy_pp_32x16_avx;
>+ p.chroma[X265_CSP_I420].copy_pp[CHROMA_32x24] = x265_blockcopy_pp_32x24_avx;
>+ p.chroma[X265_CSP_I420].copy_pp[CHROMA_32x32] = x265_blockcopy_pp_32x32_avx;
>+
>+ p.chroma[X265_CSP_I422].copy_pp[CHROMA422_32x16] = x265_blockcopy_pp_32x16_avx;
>+ p.chroma[X265_CSP_I422].copy_pp[CHROMA422_32x32] = x265_blockcopy_pp_32x32_avx;
>+ p.chroma[X265_CSP_I422].copy_pp[CHROMA422_32x48] = x265_blockcopy_pp_32x48_avx;
>+ p.chroma[X265_CSP_I422].copy_pp[CHROMA422_32x64] = x265_blockcopy_pp_32x64_avx;
>+
>+ p.luma_copy_pp[LUMA_32x8] = x265_blockcopy_pp_32x8_avx;
>+ p.luma_copy_pp[LUMA_32x16] = x265_blockcopy_pp_32x16_avx;
>+ p.luma_copy_pp[LUMA_32x24] = x265_blockcopy_pp_32x24_avx;
>+ p.luma_copy_pp[LUMA_32x32] = x265_blockcopy_pp_32x32_avx;
>+ p.luma_copy_pp[LUMA_32x64] = x265_blockcopy_pp_32x64_avx;
> }
> if (cpuMask & X265_CPU_XOP)
> {
>diff -r 4680ab4f92b8 -r e441b1fe3f78 source/common/x86/blockcopy8.asm
>--- a/source/common/x86/blockcopy8.asm Thu Sep 18 18:16:25 2014 +0530
>+++ b/source/common/x86/blockcopy8.asm Fri Sep 19 15:01:20 2014 +0530
>@@ -605,6 +605,48 @@
>
> BLOCKCOPY_PP_W32_H4 32, 48
>
>+%macro BLOCKCOPY_PP_W32_H8_avx 2
>+INIT_YMM avx
>+cglobal blockcopy_pp_%1x%2, 4, 7, 8
>+ mov r4d, %2/8
>+ lea r5, [3 * r3]
>+ lea r6, [3 * r1]
>+
>+.loop:
>+ movu m0, [r2]
>+ movu m1, [r2 + r3]
>+ movu m2, [r2 + 2 * r3]
>+ movu m3, [r2 + r5]
>+ lea r2, [r2 + 4 * r3]
>+ movu m4, [r2]
>+ movu m5, [r2 + r3]
>+ movu m6, [r2 + 2 * r3]
>+ movu m7, [r2 + r5]
>+
>+ movu [r0], m0
>+ movu [r0 + r1], m1
>+ movu [r0 + 2 * r1], m2
>+ movu [r0 + r6], m3
>+ lea r0, [r0 + 4 * r1]
>+ movu [r0], m4
>+ movu [r0 + r1], m5
>+ movu [r0 + 2 * r1], m6
>+ movu [r0 + r6], m7
>+
>+ dec r4d
>+ lea r0, [r0 + 4 * r1]
>+ lea r2, [r2 + 4 * r3]
>+ jnz .loop
>+ RET
>+%endmacro
>+
>+BLOCKCOPY_PP_W32_H8_avx 32, 8
>+BLOCKCOPY_PP_W32_H8_avx 32, 16
>+BLOCKCOPY_PP_W32_H8_avx 32, 24
>+BLOCKCOPY_PP_W32_H8_avx 32, 32
>+BLOCKCOPY_PP_W32_H8_avx 32, 48
>+BLOCKCOPY_PP_W32_H8_avx 32, 64
>+
> ;-----------------------------------------------------------------------------
> ; void blockcopy_pp_%1x%2(pixel *dest, intptr_t deststride, pixel *src, intptr_t srcstride)
> ;-----------------------------------------------------------------------------
>diff -r 4680ab4f92b8 -r e441b1fe3f78 source/common/x86/blockcopy8.h
>--- a/source/common/x86/blockcopy8.h Thu Sep 18 18:16:25 2014 +0530
>+++ b/source/common/x86/blockcopy8.h Fri Sep 19 15:01:20 2014 +0530
>@@ -183,6 +183,13 @@
> void x265_blockfill_s_16x16_sse2(int16_t *dst, intptr_t dstride, int16_t val);
> void x265_blockfill_s_32x32_sse2(int16_t *dst, intptr_t dstride, int16_t val);
>
>+void x265_blockcopy_pp_32x8_avx(pixel * a, intptr_t stridea, pixel * b, intptr_t strideb);
>+void x265_blockcopy_pp_32x16_avx(pixel * a, intptr_t stridea, pixel * b, intptr_t strideb);
>+void x265_blockcopy_pp_32x24_avx(pixel * a, intptr_t stridea, pixel * b, intptr_t strideb);
>+void x265_blockcopy_pp_32x48_avx(pixel * a, intptr_t stridea, pixel * b, intptr_t strideb);
>+void x265_blockcopy_pp_32x32_avx(pixel * a, intptr_t stridea, pixel * b, intptr_t strideb);
>+void x265_blockcopy_pp_32x64_avx(pixel * a, intptr_t stridea, pixel * b, intptr_t strideb);
>+
> #undef BLOCKCOPY_COMMON
> #undef BLOCKCOPY_SS_PP
> #undef BLOCKCOPY_SP
>_______________________________________________
>x265-devel mailing list
>x265-devel at videolan.org
>https://mailman.videolan.org/listinfo/x265-devel
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mailman.videolan.org/pipermail/x265-devel/attachments/20140920/dd119fce/attachment.html>
- Previous message: [x265] [PATCH] blockcopy_pp: 32x8, 32x16, 32x24, 32x32, 32x48, 32x64 AVX version of asm code, approx double speedup comapre to SSE
- Next message: [x265] [PATCH] blockcopy_pp: 64x16, 64x32, 64x48, 64x64 AVX version of asm code, approx double speedup comapre to SSE
- Messages sorted by:
[ date ]
[ thread ]
[ subject ]
[ author ]
More information about the x265-devel
mailing list