[x265] [PATCH] asm code for blockcopy_sp, 32xN

praveen at multicorewareinc.com praveen at multicorewareinc.com
Wed Nov 6 12:55:59 CET 2013


# HG changeset patch
# User Praveen Tiwari
# Date 1383738949 -19800
# Node ID ff734f8e18ff9d2637535d265cf1c0fa689f1ebd
# Parent  abbce0280b6814cf3ef4064503194fd0ff5aa06e
asm code for blockcopy_sp, 32xN

diff -r abbce0280b68 -r ff734f8e18ff source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Wed Nov 06 17:02:19 2013 +0530
+++ b/source/common/x86/asm-primitives.cpp	Wed Nov 06 17:25:49 2013 +0530
@@ -345,6 +345,10 @@
         p.chroma_copy_sp[CHROMA_16x16] = x265_blockcopy_sp_16x16_sse2;
         p.chroma_copy_sp[CHROMA_16x32] = x265_blockcopy_sp_16x32_sse2;
         p.chroma_copy_sp[CHROMA_24x32] = x265_blockcopy_sp_24x32_sse2;
+        p.chroma_copy_sp[CHROMA_32x8] = x265_blockcopy_sp_32x8_sse2;
+        p.chroma_copy_sp[CHROMA_32x16] = x265_blockcopy_sp_32x16_sse2;
+        p.chroma_copy_sp[CHROMA_32x24] = x265_blockcopy_sp_32x24_sse2;
+        p.chroma_copy_sp[CHROMA_32x32] = x265_blockcopy_sp_32x32_sse2;
 #if X86_64
         p.satd[LUMA_8x32] = x265_pixel_satd_8x32_sse2;
         p.satd[LUMA_16x4] = x265_pixel_satd_16x4_sse2;
diff -r abbce0280b68 -r ff734f8e18ff source/common/x86/blockcopy8.asm
--- a/source/common/x86/blockcopy8.asm	Wed Nov 06 17:02:19 2013 +0530
+++ b/source/common/x86/blockcopy8.asm	Wed Nov 06 17:25:49 2013 +0530
@@ -1253,3 +1253,61 @@
 %endmacro
 
 BLOCKCOPY_SP_W24_H2 24, 32
+
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_%1x%2(pixel *dest, intptr_t destStride, int16_t *src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SP_W32_H2 2
+INIT_XMM sse2
+cglobal blockcopy_sp_%1x%2, 4, 5, 5, dest, destStride, src, srcStride
+
+mov         r4d,    %2
+
+add        r3,      r3
+
+mova       m0,      [tab_Vm]
+
+.loop
+     movu       m1,      [r2]
+     movu       m2,      [r2 + 16]
+     movu       m3,      [r2 + 32]
+     movu       m4,      [r2 + 48]
+
+     pshufb     m1,      m0
+     pshufb     m2,      m0
+     pshufb     m3,      m0
+     pshufb     m4,      m0
+
+     movh       [r0],              m1
+     movh       [r0 + 8],          m2
+     movh       [r0 + 16],         m3
+     movh       [r0 + 24],         m4
+
+     movu       m1,      [r2 + r3]
+     movu       m2,      [r2 + r3 + 16]
+     movu       m3,      [r2 + r3 + 32]
+     movu       m4,      [r2 + r3 + 48]
+
+     pshufb     m1,      m0
+     pshufb     m2,      m0
+     pshufb     m3,      m0
+     pshufb     m4,      m0
+
+     movh       [r0 + r1],              m1
+     movh       [r0 + r1 + 8],          m2
+     movh       [r0 + r1 + 16],         m3
+     movh       [r0 + r1 + 24],         m4
+
+     lea        r0,              [r0 + 2 * r1]
+     lea        r2,              [r2 + 2 * r3]
+
+     sub        r4d,             2
+     jnz        .loop
+
+RET
+%endmacro
+
+BLOCKCOPY_SP_W32_H2 32,  8
+BLOCKCOPY_SP_W32_H2 32, 16
+BLOCKCOPY_SP_W32_H2 32, 24
+BLOCKCOPY_SP_W32_H2 32, 32


More information about the x265-devel mailing list