[x265] [PATCH] asm: 10bpp code for pixel_add_ps_16xN

murugan at multicorewareinc.com murugan at multicorewareinc.com
Mon Dec 9 08:42:41 CET 2013


# HG changeset patch
# User Murugan Vairavel <murugan at multicorewareinc.com>
# Date 1386574891 -19800
#      Mon Dec 09 13:11:31 2013 +0530
# Node ID a6ead95389e7e81f419b9f4bd2c67e90df0de198
# Parent  e1c09c89e4968d0c3e3dd5a60297a960b6df636e
asm: 10bpp code for pixel_add_ps_16xN

diff -r e1c09c89e496 -r a6ead95389e7 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Mon Dec 09 13:01:54 2013 +0530
+++ b/source/common/x86/asm-primitives.cpp	Mon Dec 09 13:11:31 2013 +0530
@@ -678,6 +678,12 @@
         p.chroma[X265_CSP_I420].add_ps[CHROMA_8x16] = x265_pixel_add_ps_8x16_sse2;
         p.chroma[X265_CSP_I420].add_ps[CHROMA_8x32] = x265_pixel_add_ps_8x32_sse2;
         p.chroma[X265_CSP_I420].add_ps[CHROMA_12x16] = x265_pixel_add_ps_12x16_sse2;
+        p.chroma[X265_CSP_I420].add_ps[CHROMA_16x4] = x265_pixel_add_ps_16x4_sse2;
+        p.chroma[X265_CSP_I420].add_ps[CHROMA_16x8] = x265_pixel_add_ps_16x8_sse2;
+        p.chroma[X265_CSP_I420].add_ps[CHROMA_16x12] = x265_pixel_add_ps_16x12_sse2;
+        p.chroma[X265_CSP_I420].add_ps[CHROMA_16x16] = x265_pixel_add_ps_16x16_sse2;
+        p.chroma[X265_CSP_I420].add_ps[CHROMA_16x32] = x265_pixel_add_ps_16x32_sse2;
+        p.luma_add_ps[LUMA_16x64] = x265_pixel_add_ps_16x64_sse2;
     }
     if (cpuMask & X265_CPU_SSSE3)
     {
diff -r e1c09c89e496 -r a6ead95389e7 source/common/x86/pixeladd8.asm
--- a/source/common/x86/pixeladd8.asm	Mon Dec 09 13:01:54 2013 +0530
+++ b/source/common/x86/pixeladd8.asm	Mon Dec 09 13:11:31 2013 +0530
@@ -911,74 +911,71 @@
 PIXEL_ADD_PS_W12_H4 12, 16
 
 ;-----------------------------------------------------------------------------
-; void pixel_add_ps_16x4(pixel *dest, intptr_t destride, pixel *src0, int16_t *scr1, intptr_t srcStride0, intptr_t srcStride1)
-;-----------------------------------------------------------------------------
-INIT_XMM sse4
-cglobal pixel_add_ps_16x4, 6, 6, 4, dest, destride, src0, scr1, srcStride0, srcStride1
-
-    add         r5,            r5
-
-    pmovzxbw    m0,            [r2]
-    pmovzxbw    m1,            [r2 + 8]
-
-    movu        m2,            [r3]
-    movu        m3,            [r3 + 16]
-
-    paddw       m0,            m2
-    paddw       m1,            m3
-
-    packuswb    m0,            m1
-
-    movu        [r0],          m0
-
-    pmovzxbw    m0,            [r2 + r4]
-    pmovzxbw    m1,            [r2 + r4 + 8]
-
-    movu        m2,            [r3 + r5]
-    movu        m3,            [r3 + r5 + 16]
-
-    paddw       m0,            m2
-    paddw       m1,            m3
-
-    packuswb    m0,            m1
-
-    movu        [r0 + r1],     m0
-
-    pmovzxbw    m0,            [r2 + 2 * r4]
-    pmovzxbw    m1,            [r2 + 2 * r4 + 8]
-
-    movu        m2,            [r3 + 2 * r5]
-    movu        m3,            [r3 + 2 * r5 + 16]
-
-    paddw       m0,            m2
-    paddw       m1,            m3
-
-    packuswb    m0,            m1
-
-    movu        [r0 + 2 * r1], m0
-
-    lea         r0,            [r0 + 2 * r1]
-    lea         r2,            [r2 + 2 * r4]
-    lea         r3,            [r3 + 2 * r5]
-
-    pmovzxbw    m0,            [r2 + r4]
-    pmovzxbw    m1,            [r2 + r4 + 8]
-
-    movu        m2,            [r3 + r5]
-    movu        m3,            [r3 + r5 + 16]
-
-    paddw       m0,            m2
-    paddw       m1,            m3
-
-    packuswb    m0,            m1
-
-    movu        [r0 + r1],     m0
-    RET
-
-;-----------------------------------------------------------------------------
 ; void pixel_add_ps_%1x%2(pixel *dest, intptr_t destride, pixel *src0, int16_t *scr1, intptr_t srcStride0, intptr_t srcStride1)
 ;-----------------------------------------------------------------------------
 %macro PIXEL_ADD_PS_W16_H4 2
+%if HIGH_BIT_DEPTH
+INIT_XMM sse2
+cglobal pixel_add_ps_%1x%2, 6, 7, 6, dest, destride, src0, scr1, srcStride0, srcStride1
+    mov     r6d,    %2/4
+    add      r1,    r1
+    add      r4,    r4
+    add      r5,    r5
+    pxor     m4,    m4
+    mova     m5,    [pw_pixel_max]
+.loop
+    movu     m0,    [r2]
+    movu     m1,    [r3]
+    movu     m2,    [r2 + 16]
+    movu     m3,    [r3 + 16]
+    paddw    m0,    m1
+    paddw    m2,    m3
+    CLIPW    m0,    m4,    m5
+    CLIPW    m2,    m4,    m5
+
+    movu     [r0],           m0
+    movu     [r0 + 16],      m2
+
+    movu     m0,    [r2 + r4]
+    movu     m1,    [r3 + r5]
+    movu     m2,    [r2 + r4 + 16]
+    movu     m3,    [r3 + r5 + 16]
+    paddw    m0,    m1
+    paddw    m2,    m3
+    CLIPW    m0,    m4,    m5
+    CLIPW    m2,    m4,    m5
+
+    movu     [r0 + r1],           m0
+    movu     [r0 + r1 + 16],      m2
+
+    lea      r2,    [r2 + 2 * r4]
+    lea      r3,    [r3 + 2 * r5]
+    lea      r0,    [r0 + 2 * r1]
+
+    movu     m0,    [r2]
+    movu     m1,    [r3]
+    movu     m2,    [r2 + 16]
+    movu     m3,    [r3 + 16]
+    paddw    m0,    m1
+    paddw    m2,    m3
+    CLIPW    m0,    m4,    m5
+    CLIPW    m2,    m4,    m5
+
+    movu     [r0],           m0
+    movu     [r0 + 16],      m2
+
+    movu     m0,    [r2 + r4]
+    movu     m1,    [r3 + r5]
+    movu     m2,    [r2 + r4 + 16]
+    movu     m3,    [r3 + r5 + 16]
+    paddw    m0,    m1
+    paddw    m2,    m3
+    CLIPW    m0,    m4,    m5
+    CLIPW    m2,    m4,    m5
+
+    movu     [r0 + r1],           m0
+    movu     [r0 + r1 + 16],      m2
+%else
 INIT_XMM sse4
 cglobal pixel_add_ps_%1x%2, 6, 7, 8, dest, destride, src0, scr1, srcStride0, srcStride1
 
@@ -1042,7 +1039,7 @@
       packuswb    m0,            m1
 
       movu        [r0 + r1],     m0
-
+%endif
       lea         r0,            [r0 + 2 * r1]
       lea         r2,            [r2 + 2 * r4]
       lea         r3,            [r3 + 2 * r5]
@@ -1053,6 +1050,7 @@
 RET
 %endmacro
 
+PIXEL_ADD_PS_W16_H4 16,  4
 PIXEL_ADD_PS_W16_H4 16,  8
 PIXEL_ADD_PS_W16_H4 16, 12
 PIXEL_ADD_PS_W16_H4 16, 16


More information about the x265-devel mailing list