[x265] [PATCH Review only] asm: pixelsub_ps routine for 16xN block

murugan at multicorewareinc.com murugan at multicorewareinc.com
Fri Nov 8 13:05:27 CET 2013


# HG changeset patch
# User Murugan Vairavel <murugan at multicorewareinc.com>
# Date 1383912304 -19800
#      Fri Nov 08 17:35:04 2013 +0530
# Node ID 2276504f3cf3d0ddad4273771a87dbcecc47b21f
# Parent  c91092ea787273e89dbd2475e7f52f8b35bc5467
asm: pixelsub_ps routine for 16xN block

diff -r c91092ea7872 -r 2276504f3cf3 source/common/x86/pixel-a.asm
--- a/source/common/x86/pixel-a.asm	Fri Nov 08 16:35:44 2013 +0530
+++ b/source/common/x86/pixel-a.asm	Fri Nov 08 17:35:04 2013 +0530
@@ -5710,3 +5710,97 @@
 %endmacro
 
 PIXELSUB_PS_W12_H4 12, 16
+
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_c_%1x%2(pixel *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+%macro PIXELSUB_PS_W16_H4 2
+INIT_XMM sse4
+cglobal pixel_sub_ps_%1x%2, 6, 7, 8, dest, deststride, src0, src1, srcstride0, srcstride1
+
+add    r1,     r1
+mov    r6d,    %2/4
+
+.loop
+
+    movh        m0,    [r2]
+    movh        m1,    [r2 + 8]
+    movh        m2,    [r3]
+    movh        m3,    [r3 + 8]
+
+    movh        m4,    [r2 + r4]
+    movh        m5,    [r2 + r4 + 8]
+    movh        m6,    [r3 + r5]
+    movh        m7,    [r3 + r5 + 8]
+
+    pmovzxbw    m0,    m0
+    pmovzxbw    m1,    m1
+    pmovzxbw    m2,    m2
+    pmovzxbw    m3,    m3
+    pmovzxbw    m4,    m4
+    pmovzxbw    m5,    m5
+    pmovzxbw    m6,    m6
+    pmovzxbw    m7,    m7
+
+    psubw       m0,    m2
+    psubw       m1,    m3
+    psubw       m4,    m6
+    psubw       m5,    m7
+
+    movu    [r0],              m0
+    movu    [r0 + 16],         m1
+    movu    [r0 + r1],         m4
+    movu    [r0 + r1 + 16],    m5
+
+    movh    m0,    [r2 + 2 * r4]
+    movh    m1,    [r2 + 2 * r4 + 8]
+    movh    m2,    [r3 + 2 * r5]
+    movh    m3,    [r3 + 2 * r5 + 8]
+
+    lea     r2,    [r2 + 2 * r4]
+    lea     r3,    [r3 + 2 * r5]
+
+    movh    m4,    [r2 + r4]
+    movh    m5,    [r2 + r4 + 8]
+    movh    m6,    [r3 + r5]
+    movh    m7,    [r3 + r5 + 8]
+
+    pmovzxbw    m0,    m0
+    pmovzxbw    m1,    m1
+    pmovzxbw    m2,    m2
+    pmovzxbw    m3,    m3
+    pmovzxbw    m4,    m4
+    pmovzxbw    m5,    m5
+    pmovzxbw    m6,    m6
+    pmovzxbw    m7,    m7
+
+    psubw       m0,    m2
+    psubw       m1,    m3
+    psubw       m4,    m6
+    psubw       m5,    m7
+
+    movu     [r0 + 2 * r1],         m0
+    movu     [r0 + 2 * r1 + 16],    m1
+
+    lea      r0,                    [r0 + 2 * r1]
+
+    movu     [r0 + r1],             m4
+    movu     [r0 + r1 + 16],        m5
+
+    lea    r2,    [r2 + 2 * r4]
+    lea    r3,    [r3 + 2 * r5]
+    lea    r0,    [r0 + 2 * r1]
+
+    dec    r6d
+
+jnz    .loop
+
+RET
+%endmacro
+
+PIXELSUB_PS_W16_H4 16, 4
+PIXELSUB_PS_W16_H4 16, 8
+PIXELSUB_PS_W16_H4 16, 12
+PIXELSUB_PS_W16_H4 16, 16
+PIXELSUB_PS_W16_H4 16, 32
+PIXELSUB_PS_W16_H4 16, 64


More information about the x265-devel mailing list