[x265] [PATCH] blockcopy_ss: 64x16, 64x32, 64x48, 64x64 AVX version of asm code, approx double speedup comapre to SSE
sagar at multicorewareinc.com
sagar at multicorewareinc.com
Tue Sep 23 15:05:01 CEST 2014
- Previous message: [x265] [PATCH] add avx version for chroma_copy_ss 16x4, 16x8, 16x12, 16x16, 16x24, 16x32, 16x64 based on csp, approx 1.5x-2x speedup over SSE
- Next message: [x265] [PATCH] blockcopy_ss: 64x16, 64x32, 64x48, 64x64 AVX version of asm code, approx double speedup comapre to SSE
- Messages sorted by:
[ date ]
[ thread ]
[ subject ]
[ author ]
# HG changeset patch
# User Sagar Kotecha
# Date 1411477373 -19800
# Tue Sep 23 18:32:53 2014 +0530
# Node ID 063c3005a181b22fb2b947ab3779479a6d2fae14
# Parent 166ed7f88b9205eb585f88c7ab43ab02b29ace69
blockcopy_ss: 64x16, 64x32, 64x48, 64x64 AVX version of asm code, approx double speedup comapre to SSE
diff -r 166ed7f88b92 -r 063c3005a181 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Tue Sep 23 12:47:02 2014 +0530
+++ b/source/common/x86/asm-primitives.cpp Tue Sep 23 18:32:53 2014 +0530
@@ -1709,6 +1709,10 @@
p.ssim_4x4x2_core = x265_pixel_ssim_4x4x2_core_avx;
p.ssim_end_4 = x265_pixel_ssim_end4_avx;
+ p.luma_copy_ss[LUMA_64x16] = x265_blockcopy_ss_64x16_avx;
+ p.luma_copy_ss[LUMA_64x32] = x265_blockcopy_ss_64x32_avx;
+ p.luma_copy_ss[LUMA_64x48] = x265_blockcopy_ss_64x48_avx;
+ p.luma_copy_ss[LUMA_64x64] = x265_blockcopy_ss_64x64_avx;
}
if (cpuMask & X265_CPU_XOP)
{
diff -r 166ed7f88b92 -r 063c3005a181 source/common/x86/blockcopy8.asm
--- a/source/common/x86/blockcopy8.asm Tue Sep 23 12:47:02 2014 +0530
+++ b/source/common/x86/blockcopy8.asm Tue Sep 23 18:32:53 2014 +0530
@@ -3317,6 +3317,69 @@
BLOCKCOPY_SS_W64_H4 64, 48
BLOCKCOPY_SS_W64_H4 64, 64
+;-----------------------------------------------------------------------------
+; void blockcopy_ss_%1x%2(int16_t *dest, intptr_t deststride, int16_t *src, intptr_t srcstride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SS_W64_H4_avx 2
+INIT_YMM avx
+cglobal blockcopy_ss_%1x%2, 4, 7, 4, dest, deststride, src, srcstride
+ mov r4d, %2/4
+ add r1, r1
+ add r3, r3
+ lea r5, [3 * r1]
+ lea r6, [3 * r3]
+.loop:
+ movu m0, [r2]
+ movu m1, [r2 + 32]
+ movu m2, [r2 + 64]
+ movu m3, [r2 + 96]
+
+ movu [r0], m0
+ movu [r0 + 32], m1
+ movu [r0 + 64], m2
+ movu [r0 + 96], m3
+
+ movu m0, [r2 + r3]
+ movu m1, [r2 + r3 + 32]
+ movu m2, [r2 + r3 + 64]
+ movu m3, [r2 + r3 + 96]
+
+ movu [r0 + r1], m0
+ movu [r0 + r1 + 32], m1
+ movu [r0 + r1 + 64], m2
+ movu [r0 + r1 + 96], m3
+
+ movu m0, [r2 + 2 * r3]
+ movu m1, [r2 + 2 * r3 + 32]
+ movu m2, [r2 + 2 * r3 + 64]
+ movu m3, [r2 + 2 * r3 + 96]
+
+ movu [r0 + 2 * r1], m0
+ movu [r0 + 2 * r1 + 32], m1
+ movu [r0 + 2 * r1 + 64], m2
+ movu [r0 + 2 * r1 + 96], m3
+
+ movu m0, [r2 + r6]
+ movu m1, [r2 + r6 + 32]
+ movu m2, [r2 + r6 + 64]
+ movu m3, [r2 + r6 + 96]
+ lea r2, [r2 + 4 * r3]
+
+ movu [r0 + r5], m0
+ movu [r0 + r5 + 32], m1
+ movu [r0 + r5 + 64], m2
+ movu [r0 + r5 + 96], m3
+ lea r0, [r0 + 4 * r1]
+
+ dec r4d
+ jnz .loop
+ RET
+%endmacro
+
+BLOCKCOPY_SS_W64_H4_avx 64, 16
+BLOCKCOPY_SS_W64_H4_avx 64, 32
+BLOCKCOPY_SS_W64_H4_avx 64, 48
+BLOCKCOPY_SS_W64_H4_avx 64, 64
;-----------------------------------------------------------------------------
; void cvt32to16_shr(short *dst, int *src, intptr_t stride, int shift, int size)
diff -r 166ed7f88b92 -r 063c3005a181 source/common/x86/blockcopy8.h
--- a/source/common/x86/blockcopy8.h Tue Sep 23 12:47:02 2014 +0530
+++ b/source/common/x86/blockcopy8.h Tue Sep 23 18:32:53 2014 +0530
@@ -189,6 +189,10 @@
void x265_blockcopy_ss_16x24_avx(int16_t *dest, intptr_t deststride, int16_t *src, intptr_t srcstride);
void x265_blockcopy_ss_16x32_avx(int16_t *dest, intptr_t deststride, int16_t *src, intptr_t srcstride);
void x265_blockcopy_ss_16x64_avx(int16_t *dest, intptr_t deststride, int16_t *src, intptr_t srcstride);
+void x265_blockcopy_ss_64x16_avx(int16_t *dest, intptr_t deststride, int16_t *src, intptr_t srcstride);
+void x265_blockcopy_ss_64x32_avx(int16_t *dest, intptr_t deststride, int16_t *src, intptr_t srcstride);
+void x265_blockcopy_ss_64x48_avx(int16_t *dest, intptr_t deststride, int16_t *src, intptr_t srcstride);
+void x265_blockcopy_ss_64x64_avx(int16_t *dest, intptr_t deststride, int16_t *src, intptr_t srcstride);
#undef BLOCKCOPY_COMMON
#undef BLOCKCOPY_SS_PP
- Previous message: [x265] [PATCH] add avx version for chroma_copy_ss 16x4, 16x8, 16x12, 16x16, 16x24, 16x32, 16x64 based on csp, approx 1.5x-2x speedup over SSE
- Next message: [x265] [PATCH] blockcopy_ss: 64x16, 64x32, 64x48, 64x64 AVX version of asm code, approx double speedup comapre to SSE
- Messages sorted by:
[ date ]
[ thread ]
[ subject ]
[ author ]
More information about the x265-devel
mailing list