[x265] [PATCH] asm code for blockcopy_sp, 8xN blocks

praveen at multicorewareinc.com praveen at multicorewareinc.com
Wed Nov 6 07:34:47 CET 2013


# HG changeset patch
# User Praveen Tiwari
# Date 1383719675 -19800
# Node ID 49db88288b33314b7157793c3a71ef613bfe5f21
# Parent  7cdcf1a03d93f8f007d9d111129d29ab31513310
asm code for blockcopy_sp, 8xN blocks

diff -r 7cdcf1a03d93 -r 49db88288b33 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Tue Nov 05 14:53:10 2013 -0600
+++ b/source/common/x86/asm-primitives.cpp	Wed Nov 06 12:04:35 2013 +0530
@@ -326,6 +326,15 @@
 
         CHROMA_BLOCKCOPY(_sse2);
         LUMA_BLOCKCOPY(_sse2);
+
+//This function pointer initaliztion is temprory will be remove later with macro definitions.
+//It is used to avoid linker error until all partion are coded and commit smaller patches, easier to review.
+
+        p.chroma_copy_sp[CHROMA_8x2] = x265_blockcopy_sp_8x2_sse2;
+        p.chroma_copy_sp[CHROMA_8x4] = x265_blockcopy_sp_8x4_sse2;
+        p.chroma_copy_sp[CHROMA_8x6] = x265_blockcopy_sp_8x6_sse2;
+        p.chroma_copy_sp[CHROMA_8x8] = x265_blockcopy_sp_8x8_sse2;
+        p.chroma_copy_sp[CHROMA_8x16] = x265_blockcopy_sp_8x16_sse2;
 #if X86_64
         p.satd[LUMA_8x32] = x265_pixel_satd_8x32_sse2;
         p.satd[LUMA_16x4] = x265_pixel_satd_16x4_sse2;
diff -r 7cdcf1a03d93 -r 49db88288b33 source/common/x86/blockcopy8.asm
--- a/source/common/x86/blockcopy8.asm	Tue Nov 05 14:53:10 2013 -0600
+++ b/source/common/x86/blockcopy8.asm	Wed Nov 06 12:04:35 2013 +0530
@@ -27,6 +27,8 @@
 
 SECTION_RODATA 32
 
+tab_Vm:    db 0, 2, 4, 6, 8, 10, 12, 14, 0, 0, 0, 0, 0, 0, 0, 0
+
 SECTION .text
 
 ;-----------------------------------------------------------------------------
@@ -796,3 +798,195 @@
 BLOCKCOPY_PP_W64_H2 64, 32
 BLOCKCOPY_PP_W64_H2 64, 48
 BLOCKCOPY_PP_W64_H2 64, 64
+
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_8x2(pixel *dest, intptr_t destStride, int16_t *src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_sp_8x2, 4, 4, 3, dest, destStride, src, srcStride
+
+add        r3,        r3
+
+mova       m0,        [tab_Vm]
+
+movu       m1,        [r2]
+movu       m2,        [r2 + r3]
+
+pshufb     m1,        m0
+pshufb     m2,        m0
+
+movh       [r0],      m1
+movh       [r0 + r1], m2
+
+RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_8x4(pixel *dest, intptr_t destStride, int16_t *src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_sp_8x4, 4, 5, 5, dest, destStride, src, srcStride
+
+add        r3,     r3
+
+mova       m0,     [tab_Vm]
+
+movu       m1,     [r2]
+movu       m2,     [r2 + r3]
+movu       m3,     [r2 + 2 * r3]
+lea        r4,     [r2 + 2 * r3]
+movu       m4,     [r4 + r3]
+
+pshufb     m1,     m0
+pshufb     m2,     m0
+pshufb     m3,     m0
+pshufb     m4,     m0
+
+movh       [r0],          m1
+movh       [r0 + r1],     m2
+movh       [r0 + 2 * r1], m3
+lea        r4,            [r0 + 2 * r1]
+movh       [r4 + r1],     m4
+
+RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_8x6(pixel *dest, intptr_t destStride, int16_t *src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_sp_8x6, 4, 5, 7, dest, destStride, src, srcStride
+
+add        r3,      r3
+
+mova       m0,      [tab_Vm]
+
+movu       m1,      [r2]
+movu       m2,      [r2 + r3]
+movu       m3,      [r2 + 2 * r3]
+lea        r4,      [r2 + 2 * r3]
+movu       m4,      [r4 + r3]
+movu       m5,      [r4 + 2 * r3]
+lea        r4,      [r4 + 2 * r3]
+movu       m6,      [r4 + r3]
+
+pshufb     m1,      m0
+pshufb     m2,      m0
+pshufb     m3,      m0
+pshufb     m4,      m0
+pshufb     m5,      m0
+pshufb     m6,      m0
+
+movh       [r0],            m1
+movh       [r0 + r1],       m2
+movh       [r0 + 2 * r1],   m3
+lea        r4,              [r0 + 2 * r1]
+movh       [r4 + r1],       m4
+movh       [r4 + 2 * r1],   m5
+lea        r4,              [r4 + 2 * r1]
+movh       [r4 + r1],       m6
+
+RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_8x8(pixel *dest, intptr_t destStride, int16_t *src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+INIT_XMM sse2
+cglobal blockcopy_sp_8x8, 4, 6, 8, dest, destStride, src, srcStride
+
+add        r3,      r3
+
+mova       m0,      [tab_Vm]
+
+movu       m1,      [r2]
+movu       m2,      [r2 + r3]
+movu       m3,      [r2 + 2 * r3]
+lea        r4,      [r2 + 2 * r3]
+movu       m4,      [r4 + r3]
+movu       m5,      [r4 + 2 * r3]
+lea        r4,      [r4 + 2 * r3]
+movu       m6,      [r4 + r3]
+movu       m7,      [r4 + 2 * r3]
+lea        r5,      [r4 + 2 * r3]
+
+pshufb     m1,      m0
+pshufb     m2,      m0
+pshufb     m3,      m0
+pshufb     m4,      m0
+pshufb     m5,      m0
+pshufb     m6,      m0
+pshufb     m7,      m0
+
+movh       [r0],            m1
+movh       [r0 + r1],       m2
+movh       [r0 + 2 * r1],   m3
+lea        r4,              [r0 + 2 * r1]
+movh       [r4 + r1],       m4
+movh       [r4 + 2 * r1],   m5
+lea        r4,              [r4 + 2 * r1]
+movh       [r4 + r1],       m6
+movh       [r4 + 2 * r1],   m7
+
+movu       m1,              [r5 + r3]
+pshufb     m1,              m0
+lea        r4,              [r4 + 2 * r1]
+movh       [r4 + r1],       m1
+
+RET
+
+;-----------------------------------------------------------------------------
+; void blockcopy_sp_%1x%2(pixel *dest, intptr_t destStride, int16_t *src, intptr_t srcStride)
+;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_SP_W8_H8 2
+INIT_XMM sse2
+cglobal blockcopy_sp_%1x%2, 4, 7, 8, dest, destStride, src, srcStride
+
+mov         r6d,    %2
+
+add         r3,     r3
+
+mova        m0,     [tab_Vm]
+
+.loop
+      movu       m1,     [r2]
+      movu       m2,     [r2 + r3]
+      movu       m3,     [r2 + 2 * r3]
+      lea        r4,     [r2 + 2 * r3]
+      movu       m4,     [r4 + r3]
+      movu       m5,     [r4 + 2 * r3]
+      lea        r4,     [r4 + 2 * r3]
+      movu       m6,     [r4 + r3]
+      movu       m7,     [r4 + 2 * r3]
+      lea        r5,     [r4 + 2 * r3]
+
+      pshufb     m1,     m0
+      pshufb     m2,     m0
+      pshufb     m3,     m0
+      pshufb     m4,     m0
+      pshufb     m5,     m0
+      pshufb     m6,     m0
+      pshufb     m7,     m0
+
+      movh       [r0],            m1
+      movh       [r0 + r1],       m2
+      movh       [r0 + 2 * r1],   m3
+      lea        r4,              [r0 + 2 * r1]
+      movh       [r4 + r1],       m4
+      movh       [r4 + 2 * r1],   m5
+      lea        r4,              [r4 + 2 * r1]
+      movh       [r4 + r1],       m6
+      movh       [r4 + 2 * r1],   m7
+
+      movu       m1,              [r5 + r3]
+      pshufb     m1,              m0
+      lea        r4,              [r4 + 2 * r1]
+      movh       [r4 + r1],       m1
+
+      lea        r0,              [r0 + 8 * r1]
+      lea        r2,              [r2 + 8 * r3]
+
+      sub        r6d,             8
+      jnz        .loop
+
+RET
+%endmacro
+
+BLOCKCOPY_SP_W8_H8 8, 16


More information about the x265-devel mailing list