[x265] [PATCH] add avx version for chroma_copy_ss 16x4, 16x8, 16x12, 16x16, 16x24, 16x32, 16x64 based on csp, approx 1.5x-2x speedup over SSE
sagar at multicorewareinc.com
sagar at multicorewareinc.com
Tue Sep 23 09:22:19 CEST 2014
- Previous message: [x265] [PATCH] add avx version for chroma_copy_ss 16x4, 16x8, 16x12, 16x16, 16x24, 16x32, 16x64 based on csp, approx 1.5x-2x speedup over SSE
- Next message: [x265] [PATCH] add avx version for chroma_copy_ss 16x4, 16x8, 16x12, 16x16, 16x24, 16x32, 16x64 based on csp, approx 1.5x-2x speedup over SSE
- Messages sorted by:
[ date ]
[ thread ]
[ subject ]
[ author ]
# HG changeset patch
# User Sagar Kotecha
# Date 1411456622 -19800
# Tue Sep 23 12:47:02 2014 +0530
# Node ID 166ed7f88b9205eb585f88c7ab43ab02b29ace69
# Parent ee76b64fd051b529cc57c4fae7d8b7e0b6f8463e
add avx version for chroma_copy_ss 16x4, 16x8, 16x12, 16x16, 16x24, 16x32, 16x64 based on csp, approx 1.5x-2x speedup over SSE
diff -r ee76b64fd051 -r 166ed7f88b92 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Mon Sep 22 21:28:59 2014 +0900
+++ b/source/common/x86/asm-primitives.cpp Tue Sep 23 12:47:02 2014 +0530
@@ -1744,6 +1744,17 @@
p.quant = x265_quant_avx2;
p.nquant = x265_nquant_avx2;
p.dequant_normal = x265_dequant_normal_avx2;
+ p.chroma[X265_CSP_I420].copy_ss[CHROMA_16x4] = x265_blockcopy_ss_16x4_avx;
+ p.chroma[X265_CSP_I420].copy_ss[CHROMA_16x12] = x265_blockcopy_ss_16x12_avx;
+ p.chroma[X265_CSP_I420].copy_ss[CHROMA_16x8] = x265_blockcopy_ss_16x8_avx;
+ p.chroma[X265_CSP_I420].copy_ss[CHROMA_16x16] = x265_blockcopy_ss_16x16_avx;
+ p.chroma[X265_CSP_I420].copy_ss[CHROMA_16x32] = x265_blockcopy_ss_16x32_avx;
+ p.chroma[X265_CSP_I422].copy_ss[CHROMA422_16x8] = x265_blockcopy_ss_16x8_avx;
+ p.chroma[X265_CSP_I422].copy_ss[CHROMA422_16x16] = x265_blockcopy_ss_16x16_avx;
+ p.chroma[X265_CSP_I422].copy_ss[CHROMA422_16x24] = x265_blockcopy_ss_16x24_avx;
+ p.chroma[X265_CSP_I422].copy_ss[CHROMA422_16x32] = x265_blockcopy_ss_16x32_avx;
+ p.chroma[X265_CSP_I422].copy_ss[CHROMA422_16x64] = x265_blockcopy_ss_16x64_avx;
+
#if X86_64
p.dct[DCT_16x16] = x265_dct16_avx2;
p.dct[DCT_32x32] = x265_dct32_avx2;
diff -r ee76b64fd051 -r 166ed7f88b92 source/common/x86/blockcopy8.asm
--- a/source/common/x86/blockcopy8.asm Mon Sep 22 21:28:59 2014 +0900
+++ b/source/common/x86/blockcopy8.asm Tue Sep 23 12:47:02 2014 +0530
@@ -2906,6 +2906,43 @@
;-----------------------------------------------------------------------------
; void blockcopy_ss_%1x%2(int16_t *dest, intptr_t deststride, int16_t *src, intptr_t srcstride)
;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SS_W16_H4_avx 2
+INIT_YMM avx
+cglobal blockcopy_ss_%1x%2, 4, 7, 4
+ mov r4d, %2/4
+ add r1, r1
+ add r3, r3
+ lea r5, [3 * r3]
+ lea r6, [3 * r1]
+.loop:
+ movu m0, [r2]
+ movu m1, [r2 + r3]
+ movu m2, [r2 + 2 * r3]
+ movu m3, [r2 + r5]
+
+ movu [r0], m0
+ movu [r0 + r1], m1
+ movu [r0 + 2 * r1], m2
+ movu [r0 + r6], m3
+
+ lea r0, [r0 + 4 * r1]
+ lea r2, [r2 + 4 * r3]
+ dec r4d
+ jnz .loop
+ RET
+%endmacro
+
+BLOCKCOPY_SS_W16_H4_avx 16, 4
+BLOCKCOPY_SS_W16_H4_avx 16, 12
+BLOCKCOPY_SS_W16_H4_avx 16, 8
+BLOCKCOPY_SS_W16_H4_avx 16, 16
+BLOCKCOPY_SS_W16_H4_avx 16, 24
+BLOCKCOPY_SS_W16_H4_avx 16, 32
+BLOCKCOPY_SS_W16_H4_avx 16, 64
+
+;-----------------------------------------------------------------------------
+; void blockcopy_ss_%1x%2(int16_t *dest, intptr_t deststride, int16_t *src, intptr_t srcstride)
+;-----------------------------------------------------------------------------
%macro BLOCKCOPY_SS_W16_H8 2
INIT_XMM sse2
cglobal blockcopy_ss_%1x%2, 4, 5, 4
diff -r ee76b64fd051 -r 166ed7f88b92 source/common/x86/blockcopy8.h
--- a/source/common/x86/blockcopy8.h Mon Sep 22 21:28:59 2014 +0900
+++ b/source/common/x86/blockcopy8.h Tue Sep 23 12:47:02 2014 +0530
@@ -182,6 +182,13 @@
void x265_blockfill_s_8x8_sse2(int16_t *dst, intptr_t dstride, int16_t val);
void x265_blockfill_s_16x16_sse2(int16_t *dst, intptr_t dstride, int16_t val);
void x265_blockfill_s_32x32_sse2(int16_t *dst, intptr_t dstride, int16_t val);
+void x265_blockcopy_ss_16x4_avx(int16_t *dest, intptr_t deststride, int16_t *src, intptr_t srcstride);
+void x265_blockcopy_ss_16x8_avx(int16_t *dest, intptr_t deststride, int16_t *src, intptr_t srcstride);
+void x265_blockcopy_ss_16x12_avx(int16_t *dest, intptr_t deststride, int16_t *src, intptr_t srcstride);
+void x265_blockcopy_ss_16x16_avx(int16_t *dest, intptr_t deststride, int16_t *src, intptr_t srcstride);
+void x265_blockcopy_ss_16x24_avx(int16_t *dest, intptr_t deststride, int16_t *src, intptr_t srcstride);
+void x265_blockcopy_ss_16x32_avx(int16_t *dest, intptr_t deststride, int16_t *src, intptr_t srcstride);
+void x265_blockcopy_ss_16x64_avx(int16_t *dest, intptr_t deststride, int16_t *src, intptr_t srcstride);
#undef BLOCKCOPY_COMMON
#undef BLOCKCOPY_SS_PP
- Previous message: [x265] [PATCH] add avx version for chroma_copy_ss 16x4, 16x8, 16x12, 16x16, 16x24, 16x32, 16x64 based on csp, approx 1.5x-2x speedup over SSE
- Next message: [x265] [PATCH] add avx version for chroma_copy_ss 16x4, 16x8, 16x12, 16x16, 16x24, 16x32, 16x64 based on csp, approx 1.5x-2x speedup over SSE
- Messages sorted by:
[ date ]
[ thread ]
[ subject ]
[ author ]
More information about the x265-devel
mailing list