[x265] [PATCH] asm: 10bpp code for pixel_sub_64xN

murugan at multicorewareinc.com murugan at multicorewareinc.com
Fri Dec 6 09:31:20 CET 2013


# HG changeset patch
# User Murugan Vairavel <murugan at multicorewareinc.com>
# Date 1386318564 -19800
#      Fri Dec 06 13:59:24 2013 +0530
# Node ID a87aa775087d5fdd3a75d5f3f599178034cf2db1
# Parent  b9fcce63ab5cc543b5bdaad68c1aff2e64f8f523
asm: 10bpp code for pixel_sub_64xN

diff -r b9fcce63ab5c -r a87aa775087d source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Fri Dec 06 13:45:37 2013 +0530
+++ b/source/common/x86/asm-primitives.cpp	Fri Dec 06 13:59:24 2013 +0530
@@ -659,6 +659,10 @@
         p.luma_sub_ps[LUMA_16x64] = x265_pixel_sub_ps_16x64_sse2;
         p.luma_sub_ps[LUMA_32x64] = x265_pixel_sub_ps_32x64_sse2;
         p.luma_sub_ps[LUMA_48x64] = x265_pixel_sub_ps_48x64_sse2;
+        p.luma_sub_ps[LUMA_64x16] = x265_pixel_sub_ps_64x16_sse2;
+        p.luma_sub_ps[LUMA_64x32] = x265_pixel_sub_ps_64x32_sse2;
+        p.luma_sub_ps[LUMA_64x48] = x265_pixel_sub_ps_64x48_sse2;
+        p.luma_sub_ps[LUMA_64x64] = x265_pixel_sub_ps_64x64_sse2;
     }
     if (cpuMask & X265_CPU_SSSE3)
     {
diff -r b9fcce63ab5c -r a87aa775087d source/common/x86/pixel-util8.asm
--- a/source/common/x86/pixel-util8.asm	Fri Dec 06 13:45:37 2013 +0530
+++ b/source/common/x86/pixel-util8.asm	Fri Dec 06 13:59:24 2013 +0530
@@ -3451,15 +3451,92 @@
 ; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
 ;-----------------------------------------------------------------------------
 %macro PIXELSUB_PS_W64_H2 2
-INIT_XMM sse4
+%if HIGH_BIT_DEPTH
+cglobal pixel_sub_ps_%1x%2, 6, 7, 6, dest, deststride, src0, src1, srcstride0, srcstride1
+    add     r1,     r1
+    mov     r6d,    %2/2
+    add     r4,     r4
+    add     r5,     r5
+.loop
+    movu     m0,    [r2]
+    movu     m1,    [r3]
+    movu     m2,    [r2 + 16]
+    movu     m3,    [r3 + 16]
+    movu     m4,    [r2 + 32]
+    movu     m5,    [r3 + 32]
+    psubw    m0,    m1
+    psubw    m2,    m3
+    psubw    m4,    m5
+    movu     m3,    [r2 + 48]
+    movu     m5,    [r3 + 48]
+    psubw    m3,    m5
+
+    movu    [r0],         m0
+    movu    [r0 + 16],    m2
+    movu    [r0 + 32],    m4
+    movu    [r0 + 48],    m3
+
+    movu     m0,    [r2 + 64]
+    movu     m1,    [r3 + 64]
+    movu     m2,    [r2 + 80]
+    movu     m3,    [r3 + 80]
+    movu     m4,    [r2 + 96]
+    movu     m5,    [r3 + 96]
+    psubw    m0,    m1
+    psubw    m2,    m3
+    psubw    m4,    m5
+    movu     m3,    [r2 + 112]
+    movu     m5,    [r3 + 112]
+    psubw    m3,    m5
+
+    movu    [r0 + 64],     m0
+    movu    [r0 + 80],     m2
+    movu    [r0 + 96],     m4
+    movu    [r0 + 112],    m3
+
+    movu     m0,    [r2 + r4]
+    movu     m1,    [r3 + r5]
+    movu     m2,    [r2 + r4 + 16]
+    movu     m3,    [r3 + r5 + 16]
+    movu     m4,    [r2 + r4 + 32]
+    movu     m5,    [r3 + r5 + 32]
+    psubw    m0,    m1
+    psubw    m2,    m3
+    psubw    m4,    m5
+    movu     m3,    [r2 + r4 + 48]
+    movu     m5,    [r3 + r5 + 48]
+    psubw    m3,    m5
+
+    movu    [r0 + r1],         m0
+    movu    [r0 + r1 + 16],    m2
+    movu    [r0 + r1 + 32],    m4
+    movu    [r0 + r1 + 48],    m3
+
+    movu     m0,    [r2 + r4 + 64]
+    movu     m1,    [r3 + r5 + 64]
+    movu     m2,    [r2 + r4 + 80]
+    movu     m3,    [r3 + r5 + 80]
+    movu     m4,    [r2 + r4 + 96]
+    movu     m5,    [r3 + r5 + 96]
+    psubw    m0,    m1
+    psubw    m2,    m3
+    psubw    m4,    m5
+    movu     m3,    [r2 + r4 + 112]
+    movu     m5,    [r3 + r5 + 112]
+    psubw    m3,    m5
+
+    movu    [r0 + r1 + 64],     m0
+    movu    [r0 + r1 + 80],     m2
+    movu    [r0 + r1 + 96],     m4
+    movu    [r0 + r1 + 112],    m3
+
+%else
+
 cglobal pixel_sub_ps_%1x%2, 6, 7, 7, dest, deststride, src0, src1, srcstride0, srcstride1
-
-add    r1,     r1
-mov    r6d,    %2/2
-pxor   m6,    m6
-
+    add     r1,     r1
+    mov     r6d,    %2/2
+    pxor    m6,     m6
 .loop
-
     movu         m1,    [r2]
     pmovzxbw     m0,    m1
     punpckhbw    m1,    m6
@@ -3469,7 +3546,6 @@
     movu         m5,    [r2 + 16]
     pmovzxbw     m4,    m5
     punpckhbw    m5,    m6
-
     psubw        m0,    m2
     psubw        m1,    m3
 
@@ -3482,7 +3558,6 @@
     movu         m3,    [r2 + 32]
     pmovzxbw     m2,    m3
     punpckhbw    m3,    m6
-
     psubw        m4,    m0
     psubw        m5,    m1
 
@@ -3495,7 +3570,6 @@
     movu         m1,    [r2 + 48]
     pmovzxbw     m0,    m1
     punpckhbw    m1,    m6
-
     psubw        m2,    m4
     psubw        m3,    m5
 
@@ -3508,7 +3582,6 @@
     movu         m5,    [r2 + r4]
     pmovzxbw     m4,    m5
     punpckhbw    m5,    m6
-
     psubw        m0,    m2
     psubw        m1,    m3
 
@@ -3521,7 +3594,6 @@
     movu         m3,    [r2 + r4 + 16]
     pmovzxbw     m2,    m3
     punpckhbw    m3,    m6
-
     psubw        m4,    m0
     psubw        m5,    m1
 
@@ -3534,7 +3606,6 @@
     movu         m1,    [r2 + r4 + 32]
     pmovzxbw     m0,    m1
     punpckhbw    m1,    m6
-
     psubw        m2,    m4
     psubw        m3,    m5
 
@@ -3547,7 +3618,6 @@
     movu         m5,    [r2 + r4 + 48]
     pmovzxbw     m4,    m5
     punpckhbw    m5,    m6
-
     psubw        m0,    m2
     psubw        m1,    m3
 
@@ -3557,28 +3627,33 @@
     movu         m1,    [r3 + r5 + 48]
     pmovzxbw     m0,    m1
     punpckhbw    m1,    m6
-
     psubw        m4,    m0
     psubw        m5,    m1
 
     movu    [r0 + r1 + 96],     m4
     movu    [r0 + r1 + 112],    m5
-
-    lea     r2,                 [r2 + 2 * r4]
-    lea     r3,                 [r3 + 2 * r5]
-    lea     r0,                 [r0 + 2 * r1]
-
+%endif
     dec    r6d
-
-jnz    .loop
-
-RET
+    lea    r2,    [r2 + 2 * r4]
+    lea    r3,    [r3 + 2 * r5]
+    lea    r0,    [r0 + 2 * r1]
+    jnz    .loop
+    RET
 %endmacro
 
+%if HIGH_BIT_DEPTH
+INIT_XMM sse2
 PIXELSUB_PS_W64_H2 64, 16
 PIXELSUB_PS_W64_H2 64, 32
 PIXELSUB_PS_W64_H2 64, 48
 PIXELSUB_PS_W64_H2 64, 64
+%else
+INIT_XMM sse4
+PIXELSUB_PS_W64_H2 64, 16
+PIXELSUB_PS_W64_H2 64, 32
+PIXELSUB_PS_W64_H2 64, 48
+PIXELSUB_PS_W64_H2 64, 64
+%endif
 
 ;=============================================================================
 ; variance


More information about the x265-devel mailing list