[x265] [PATCH] asm: 10bpp code for blockcopy_ps_64xN

murugan at multicorewareinc.com murugan at multicorewareinc.com
Tue Dec 10 11:11:15 CET 2013


# HG changeset patch
# User Murugan Vairavel <murugan at multicorewareinc.com>
# Date 1386670150 -19800
#      Tue Dec 10 15:39:10 2013 +0530
# Node ID 349641856c90412e395b8d6c607235ae8587a4dc
# Parent  674a87448095d5f907454ed3fe2e1b1fe411a480
asm: 10bpp code for blockcopy_ps_64xN

diff -r 674a87448095 -r 349641856c90 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Tue Dec 10 14:00:04 2013 +0530
+++ b/source/common/x86/asm-primitives.cpp	Tue Dec 10 15:39:10 2013 +0530
@@ -691,6 +691,10 @@
         p.chroma[X265_CSP_I420].copy_pp[LUMA_64x64] = x265_blockcopy_pp_32x32_sse2;
         p.luma_copy_pp[LUMA_32x64] = x265_blockcopy_pp_32x64_sse2;
         p.luma_copy_pp[LUMA_48x64] = x265_blockcopy_pp_48x64_sse2;
+        p.luma_copy_pp[LUMA_64x16] = x265_blockcopy_pp_64x16_sse2;
+        p.luma_copy_pp[LUMA_64x32] = x265_blockcopy_pp_64x32_sse2;
+        p.luma_copy_pp[LUMA_64x48] = x265_blockcopy_pp_64x48_sse2;
+        p.luma_copy_pp[LUMA_64x64] = x265_blockcopy_pp_64x64_sse2;
     }
     if (cpuMask & X265_CPU_SSSE3)
     {
diff -r 674a87448095 -r 349641856c90 source/common/x86/blockcopy8.asm
--- a/source/common/x86/blockcopy8.asm	Tue Dec 10 14:00:04 2013 +0530
+++ b/source/common/x86/blockcopy8.asm	Tue Dec 10 15:39:10 2013 +0530
@@ -1097,46 +1097,150 @@
 ;-----------------------------------------------------------------------------
 ; void blockcopy_pp_%1x%2(pixel *dest, intptr_t deststride, pixel *src, intptr_t srcstride)
 ;-----------------------------------------------------------------------------
-%macro BLOCKCOPY_PP_W64_H2 2
+%macro BLOCKCOPY_PP_W64_H4 2
 INIT_XMM sse2
-cglobal blockcopy_pp_%1x%2, 4, 5, 8, dest, deststride, src, srcstride
-
-mov         r4d,       %2
-
+cglobal blockcopy_pp_%1x%2, 4, 5, 6, dest, deststride, src, srcstride
+    mov    r4d,    %2/4
+%if HIGH_BIT_DEPTH
+    add     r1,    r1
+    add     r3,    r3
 .loop
-     movu     m0,     [r2]
-     movu     m1,     [r2 + 16]
-     movu     m2,     [r2 + 32]
-     movu     m3,     [r2 + 48]
-
-     movu     m4,     [r2 + r3]
-     movu     m5,     [r2 + r3 + 16]
-     movu     m6,     [r2 + r3 + 32]
-     movu     m7,     [r2 + r3 + 48]
-
-     movu     [r0],                 m0
-     movu     [r0 + 16],            m1
-     movu     [r0 + 32],            m2
-     movu     [r0 + 48],            m3
-
-     movu     [r0 + r1],            m4
-     movu     [r0 + r1 + 16],       m5
-     movu     [r0 + r1 + 32],       m6
-     movu     [r0 + r1 + 48],       m7
-
-     lea      r0,                   [r0 + 2 * r1]
-     lea      r2,                   [r2 + 2 * r3]
-
-     sub      r4d,                  2
-     jnz      .loop
-
-RET
+    movu    m0,    [r2]
+    movu    m1,    [r2 + 16]
+    movu    m2,    [r2 + 32]
+    movu    m3,    [r2 + 48]
+    movu    m4,    [r2 + 64]
+    movu    m5,    [r2 + 80]
+
+    movu    [r0],         m0
+    movu    [r0 + 16],    m1
+    movu    [r0 + 32],    m2
+    movu    [r0 + 48],    m3
+    movu    [r0 + 64],    m4
+    movu    [r0 + 80],    m5
+
+    movu    m0,    [r2 + 96]
+    movu    m1,    [r2 + 112]
+    movu    m2,    [r2 + r3]
+    movu    m3,    [r2 + r3 + 16]
+    movu    m4,    [r2 + r3 + 32]
+    movu    m5,    [r2 + r3 + 48]
+
+    movu    [r0 + 96],         m0
+    movu    [r0 + 112],        m1
+    movu    [r0 + r1],         m2
+    movu    [r0 + r1 + 16],    m3
+    movu    [r0 + r1 + 32],    m4
+    movu    [r0 + r1 + 48],    m5
+
+    movu    m0,    [r2 + r3 + 64]
+    movu    m1,    [r2 + r3 + 80]
+    movu    m2,    [r2 + r3 + 96]
+    movu    m3,    [r2 + r3 + 112]
+    lea     r2,    [r2 + 2 * r3]
+
+    movu    [r0 + r1 + 64],    m0
+    movu    [r0 + r1 + 80],    m1
+    movu    [r0 + r1 + 96],    m2
+    movu    [r0 + r1 + 112],    m3
+
+    lea     r0,    [r0 + 2 * r1]
+    movu    m0,    [r2]
+    movu    m1,    [r2 + 16]
+    movu    m2,    [r2 + 32]
+    movu    m3,    [r2 + 48]
+    movu    m4,    [r2 + 64]
+    movu    m5,    [r2 + 80]
+
+    movu    [r0],         m0
+    movu    [r0 + 16],    m1
+    movu    [r0 + 32],    m2
+    movu    [r0 + 48],    m3
+    movu    [r0 + 64],    m4
+    movu    [r0 + 80],    m5
+
+    movu    m0,    [r2 + 96]
+    movu    m1,    [r2 + 112]
+    movu    m2,    [r2 + r3]
+    movu    m3,    [r2 + r3 + 16]
+    movu    m4,    [r2 + r3 + 32]
+    movu    m5,    [r2 + r3 + 48]
+
+    movu    [r0 + 96],         m0
+    movu    [r0 + 112],        m1
+    movu    [r0 + r1],         m2
+    movu    [r0 + r1 + 16],    m3
+    movu    [r0 + r1 + 32],    m4
+    movu    [r0 + r1 + 48],    m5
+
+    movu    m0,    [r2 + r3 + 64]
+    movu    m1,    [r2 + r3 + 80]
+    movu    m2,    [r2 + r3 + 96]
+    movu    m3,    [r2 + r3 + 112]
+
+    movu    [r0 + r1 + 64],    m0
+    movu    [r0 + r1 + 80],    m1
+    movu    [r0 + r1 + 96],    m2
+    movu    [r0 + r1 + 112],    m3
+
+    dec     r4d
+    lea     r0,    [r0 + 2 * r1]
+    lea     r2,    [r2 + 2 * r3]
+    jnz     .loop
+%else
+.loop
+    movu    m0,    [r2]
+    movu    m1,    [r2 + 16]
+    movu    m2,    [r2 + 32]
+    movu    m3,    [r2 + 48]
+    movu    m4,    [r2 + r3]
+    movu    m5,    [r2 + r3 + 16]
+
+    movu    [r0],              m0
+    movu    [r0 + 16],         m1
+    movu    [r0 + 32],         m2
+    movu    [r0 + 48],         m3
+    movu    [r0 + r1],         m4
+    movu    [r0 + r1 + 16],    m5
+
+    movu    m0,    [r2 + r3 + 32]
+    movu    m1,    [r2 + r3 + 48]
+    lea     r2,    [r2 + 2 * r3]
+    movu    m2,    [r2]
+    movu    m3,    [r2 + 16]
+    movu    m4,    [r2 + 32]
+    movu    m5,    [r2 + 48]
+
+    movu    [r0 + r1 + 32],    m0
+    movu    [r0 + r1 + 48],    m1
+    lea     r0,                [r0 + 2 * r1]
+    movu    [r0],              m2
+    movu    [r0 + 16],         m3
+    movu    [r0 + 32],         m4
+    movu    [r0 + 48],         m5
+
+    movu    m0,    [r2 + r3]
+    movu    m1,    [r2 + r3 + 16]
+    movu    m2,    [r2 + r3 + 32]
+    movu    m3,    [r2 + r3 + 48]
+
+    movu    [r0 + r1],         m0
+    movu    [r0 + r1 + 16],    m1
+    movu    [r0 + r1 + 32],    m2
+    movu    [r0 + r1 + 48],    m3
+
+    dec    r4d
+    lea    r0,    [r0 + 2 * r1]
+    lea    r2,    [r2 + 2 * r3]
+    jnz    .loop
+%endif
+    RET
 %endmacro
 
-BLOCKCOPY_PP_W64_H2 64, 16
-BLOCKCOPY_PP_W64_H2 64, 32
-BLOCKCOPY_PP_W64_H2 64, 48
-BLOCKCOPY_PP_W64_H2 64, 64
+BLOCKCOPY_PP_W64_H4 64, 16
+BLOCKCOPY_PP_W64_H4 64, 32
+BLOCKCOPY_PP_W64_H4 64, 48
+BLOCKCOPY_PP_W64_H4 64, 64
 
 ;-----------------------------------------------------------------------------
 ; void blockcopy_sp_2x4(pixel *dest, intptr_t destStride, int16_t *src, intptr_t srcStride)


More information about the x265-devel mailing list