[x265] [PATCH] asm code for blockcopy_sp, 64xN

praveen at multicorewareinc.com praveen at multicorewareinc.com
Wed Nov 6 16:56:57 CET 2013


# HG changeset patch
# User Praveen Tiwari
# Date 1383753408 -19800
# Node ID 26563d0eecff19684985518c3ca578aa553000a1
# Parent  3725f4ce2d1cba50d61d1e32a14c7fdacd65c6de
asm code for blockcopy_sp, 64xN

diff -r 3725f4ce2d1c -r 26563d0eecff source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Wed Nov 06 21:06:54 2013 +0530
+++ b/source/common/x86/asm-primitives.cpp	Wed Nov 06 21:26:48 2013 +0530
@@ -356,6 +356,10 @@
         p.chroma_copy_sp[CHROMA_32x32] = x265_blockcopy_sp_32x32_sse2;
         p.luma_copy_sp[LUMA_32x64] = x265_blockcopy_sp_32x64_sse2;
         p.luma_copy_sp[LUMA_48x64] = x265_blockcopy_sp_48x64_sse2;
+        p.luma_copy_sp[LUMA_64x16] = x265_blockcopy_sp_64x16_sse2;
+        p.luma_copy_sp[LUMA_64x32] = x265_blockcopy_sp_64x32_sse2;
+        p.luma_copy_sp[LUMA_64x48] = x265_blockcopy_sp_64x48_sse2;
+        p.luma_copy_sp[LUMA_64x64] = x265_blockcopy_sp_64x64_sse2;
 #if X86_64
         p.satd[LUMA_8x32] = x265_pixel_satd_8x32_sse2;
         p.satd[LUMA_16x4] = x265_pixel_satd_16x4_sse2;
diff -r 3725f4ce2d1c -r 26563d0eecff source/common/x86/blockcopy8.asm
--- a/source/common/x86/blockcopy8.asm	Wed Nov 06 21:06:54 2013 +0530
+++ b/source/common/x86/blockcopy8.asm	Wed Nov 06 21:26:48 2013 +0530
@@ -1590,3 +1590,59 @@
 %endmacro
 
 BLOCKCOPY_SP_W48_H2 48, 64
+
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_%1x%2(pixel *dest, intptr_t destStride, int16_t *src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SP_W64_H1 2
+INIT_XMM sse2
+cglobal blockcopy_sp_%1x%2, 4, 5, 8, dest, destStride, src, srcStride
+
+mov         r4d,    %2
+
+add         r3,     r3
+
+mova        m0,     [tab_Vm]
+
+.loop
+      movu       m1,     [r2]
+      movu       m2,     [r2 + 16]
+      movu       m3,     [r2 + 32]
+      movu       m4,     [r2 + 48]
+      movu       m5,     [r2 + 64]
+      movu       m6,     [r2 + 80]
+      movu       m7,     [r2 + 96]
+
+      pshufb     m1,     m0
+      pshufb     m2,     m0
+      pshufb     m3,     m0
+      pshufb     m4,     m0
+      pshufb     m5,     m0
+      pshufb     m6,     m0
+      pshufb     m7,     m0
+
+      movh       [r0],      m1
+      movh       [r0 + 8],  m2
+      movh       [r0 + 16], m3
+      movh       [r0 + 24], m4
+      movh       [r0 + 32], m5
+      movh       [r0 + 40], m6
+      movh       [r0 + 48], m7
+
+      movu       m7,        [r2 + 112]
+      pshufb     m7,        m0
+      movh       [r0 + 56], m7
+
+      lea        r0,              [r0 + r1]
+      lea        r2,              [r2 + r3]
+
+      dec        r4d
+      jnz        .loop
+
+RET
+%endmacro
+
+BLOCKCOPY_SP_W64_H1 64, 16
+BLOCKCOPY_SP_W64_H1 64, 32
+BLOCKCOPY_SP_W64_H1 64, 48
+BLOCKCOPY_SP_W64_H1 64, 64


More information about the x265-devel mailing list