[x265] [PATCH] pixel_add_ps_64xN, asm code

praveen at multicorewareinc.com praveen at multicorewareinc.com
Fri Nov 22 11:17:22 CET 2013


# HG changeset patch
# User Praveen Tiwari
# Date 1385115422 -19800
# Node ID 87a797d1c03afaea0b3cf9a2dfcac2c7e2950efc
# Parent  dfc9c2755c5a6c569d2a637d931a34a3a7967b92
pixel_add_ps_64xN, asm code

diff -r dfc9c2755c5a -r 87a797d1c03a source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Fri Nov 22 15:34:59 2013 +0530
+++ b/source/common/x86/asm-primitives.cpp	Fri Nov 22 15:47:02 2013 +0530
@@ -649,6 +649,10 @@
         p.chroma[X265_CSP_I420].filter_vsp[CHROMA_6x8] = x265_interp_4tap_vert_sp_6x8_sse4;
 
         p.luma_add_ps[LUMA_48x64] = x265_pixel_add_ps_48x64_sse4;
+        p.luma_add_ps[LUMA_64x16] = x265_pixel_add_ps_64x16_sse4;
+        p.luma_add_ps[LUMA_64x32] = x265_pixel_add_ps_64x32_sse4;
+        p.luma_add_ps[LUMA_64x48] = x265_pixel_add_ps_64x48_sse4;
+        p.luma_add_ps[LUMA_64x64] = x265_pixel_add_ps_64x64_sse4;
 
         p.calcrecon[BLOCK_16x16] = x265_calcRecons16_sse4;
         p.calcrecon[BLOCK_32x32] = x265_calcRecons32_sse4;
diff -r dfc9c2755c5a -r 87a797d1c03a source/common/x86/pixeladd8.asm
--- a/source/common/x86/pixeladd8.asm	Fri Nov 22 15:34:59 2013 +0530
+++ b/source/common/x86/pixeladd8.asm	Fri Nov 22 15:47:02 2013 +0530
@@ -987,3 +987,72 @@
 %endmacro
 
 PIXEL_ADD_PS_W48_H2 48, 64
+
+;-----------------------------------------------------------------------------
+; void pixel_add_ps_%1x%2(pixel *dest, intptr_t destride, pixel *src0, int16_t *scr1, intptr_t srcStride0, intptr_t srcStride1)
+;-----------------------------------------------------------------------------
+%macro PIXEL_ADD_PS_W64_H1 2
+INIT_XMM sse4
+cglobal pixel_add_ps_%1x%2, 6, 7, 8, dest, destride, src0, scr1, srcStride0, srcStride1
+
+add         r5,            r5
+
+mov         r6d,           %2
+
+.loop
+      pmovzxbw    m0,             [r2]
+      pmovzxbw    m1,             [r2 + 8]
+      pmovzxbw    m2,             [r2 + 16]
+      pmovzxbw    m3,             [r2 + 24]
+
+      movu        m4,             [r3]
+      movu        m5,             [r3 + 16]
+      movu        m6,             [r3 + 32]
+      movu        m7,             [r3 + 48]
+
+      paddw       m0,             m4
+      paddw       m1,             m5
+      paddw       m2,             m6
+      paddw       m3,             m7
+
+      packuswb    m0,             m1
+      packuswb    m2,             m3
+
+      movu        [r0],           m0
+      movu        [r0 + 16],      m2
+
+      pmovzxbw    m0,             [r2 + 32]
+      pmovzxbw    m1,             [r2 + 40]
+      pmovzxbw    m2,             [r2 + 48]
+      pmovzxbw    m3,             [r2 + 56]
+
+      movu        m4,             [r3 + 64]
+      movu        m5,             [r3 + 80]
+      movu        m6,             [r3 + 96]
+      movu        m7,             [r3 + 112]
+
+      paddw       m0,             m4
+      paddw       m1,             m5
+      paddw       m2,             m6
+      paddw       m3,             m7
+
+      packuswb    m0,             m1
+      packuswb    m2,             m3
+
+      movu        [r0 + 32],      m0
+      movu        [r0 + 48],      m2
+
+      lea         r0,             [r0 + r1]
+      lea         r2,             [r2 + r4]
+      lea         r3,             [r3 + r5]
+
+      dec         r6d
+      jnz         .loop
+
+RET
+%endmacro
+
+PIXEL_ADD_PS_W64_H1 64, 16
+PIXEL_ADD_PS_W64_H1 64, 32
+PIXEL_ADD_PS_W64_H1 64, 48
+PIXEL_ADD_PS_W64_H1 64, 64


More information about the x265-devel mailing list