[x265] [PATCH] asm code for blockcopy_sp, 16xN blocks
praveen at multicorewareinc.com
praveen at multicorewareinc.com
Wed Nov 6 11:31:52 CET 2013
# HG changeset patch
# User Praveen Tiwari
# Date 1383733901 -19800
# Node ID 7486c5c64cc0666deab456aea12a2e1710873680
# Parent 6f9534cb74935fb1593a84a96b495f3ea30ccec4
asm code for blockcopy_sp, 16xN blocks
diff -r 6f9534cb7493 -r 7486c5c64cc0 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Wed Nov 06 13:06:15 2013 +0530
+++ b/source/common/x86/asm-primitives.cpp Wed Nov 06 16:01:41 2013 +0530
@@ -339,6 +339,11 @@
p.chroma_copy_sp[CHROMA_8x6] = x265_blockcopy_sp_8x6_sse2;
p.chroma_copy_sp[CHROMA_8x8] = x265_blockcopy_sp_8x8_sse2;
p.chroma_copy_sp[CHROMA_8x16] = x265_blockcopy_sp_8x16_sse2;
+ p.chroma_copy_sp[CHROMA_16x4] = x265_blockcopy_sp_16x4_sse2;
+ p.chroma_copy_sp[CHROMA_16x8] = x265_blockcopy_sp_16x8_sse2;
+ p.chroma_copy_sp[CHROMA_16x12] = x265_blockcopy_sp_16x12_sse2;
+ p.chroma_copy_sp[CHROMA_16x16] = x265_blockcopy_sp_16x16_sse2;
+ p.chroma_copy_sp[CHROMA_16x32] = x265_blockcopy_sp_16x32_sse2;
#if X86_64
p.satd[LUMA_8x32] = x265_pixel_satd_8x32_sse2;
p.satd[LUMA_16x4] = x265_pixel_satd_16x4_sse2;
diff -r 6f9534cb7493 -r 7486c5c64cc0 source/common/x86/blockcopy8.asm
--- a/source/common/x86/blockcopy8.asm Wed Nov 06 13:06:15 2013 +0530
+++ b/source/common/x86/blockcopy8.asm Wed Nov 06 16:01:41 2013 +0530
@@ -1146,3 +1146,64 @@
%endmacro
BLOCKCOPY_SP_W8_H8 8, 16
+
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_%1x%2(pixel *dest, intptr_t destStride, int16_t *src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SP_W16_H4 2
+INIT_XMM sse2
+cglobal blockcopy_sp_%1x%2, 4, 7, 7, dest, destStride, src, srcStride
+
+mov r6d, %2
+
+add r3, r3
+
+mova m0, [tab_Vm]
+
+.loop
+ movu m1, [r2]
+ movu m2, [r2 + 16]
+ movu m3, [r2 + r3]
+ movu m4, [r2 + r3 + 16]
+ movu m5, [r2 + 2 * r3]
+ movu m6, [r2 + 2 * r3 + 16]
+
+ pshufb m1, m0
+ pshufb m2, m0
+ pshufb m3, m0
+ pshufb m4, m0
+ pshufb m5, m0
+ pshufb m6, m0
+
+ movh [r0], m1
+ movh [r0 + 8], m2
+ movh [r0 + r1], m3
+ movh [r0 + r1 + 8], m4
+ movh [r0 + 2 * r1], m5
+ movh [r0 + 2 * r1 + 8], m6
+
+ lea r4, [r2 + 2 * r3]
+ movu m1, [r4 + r3]
+ movu m2, [r4 + r3 + 16]
+
+ pshufb m1, m0
+ pshufb m2, m0
+
+ lea r5, [r0 + 2 * r1]
+ movh [r5 + r1], m1
+ movh [r5 + r1 + 8], m2
+
+ lea r0, [r5 + 2 * r1]
+ lea r2, [r4 + 2 * r3]
+
+ sub r6d, 4
+ jnz .loop
+
+RET
+%endmacro
+
+BLOCKCOPY_SP_W16_H4 16, 4
+BLOCKCOPY_SP_W16_H4 16, 8
+BLOCKCOPY_SP_W16_H4 16, 12
+BLOCKCOPY_SP_W16_H4 16, 16
+BLOCKCOPY_SP_W16_H4 16, 32
More information about the x265-devel
mailing list