[x265] [PATCH 1 of 2] asm:intra pred planar32 sse2

dave dtyx265 at gmail.com
Tue Mar 10 00:00:08 CET 2015


On 03/09/2015 03:20 PM, chen wrote:
> a little improve need integrate
> lea             r0, [r0 + r1]
> when I replace it by "add r0, r1", I got ~3% improve on my Haswell PC
This makes no difference on my old machine.
> At 2015-03-10 05:24:08,dtyx265 at gmail.com wrote:
> ># HG changeset patch
> ># User David T Yuen <dtyx265 at gmail.com>
> ># Date 1425934257 25200
> ># Node ID ef383507c21ca704b58e759e440f4ae2e177c499
> ># Parent  c16a875d913fffbad405f7d5af51700b1a8990bb
> >asm:intra pred planar32 sse2
> >
> >This replaces c code for systems using ssse3 to sse2 processors
> >The code is backported from intrapred planar32 sse4
> >
> >There are essentially are two versions here.
> >One for x86_64 and one for x86_32.  It would have been too ugly
> >to conditionally code the differences in a single primitive.
> >
> >64-bit
> >
> >./test/TestBench --testbench intrapred | grep intra_planar_32x32
> >intra_planar_32x32	13.12x 	 9254.99  	 121439.99
> >
> >32-bit
> >
> >./test/TestBench --testbench intrapred | grep intra_planar_32x32
> >intra_planar_32x32	10.20x 	 9720.04  	 99170.14
> >
> >diff -r c16a875d913f -r ef383507c21c source/common/x86/asm-primitives.cpp
> >--- a/source/common/x86/asm-primitives.cpp	Thu Mar 05 16:01:49 2015 -0800
> >+++ b/source/common/x86/asm-primitives.cpp	Mon Mar 09 13:50:57 2015 -0700
> >@@ -1221,6 +1221,7 @@
> >         p.cu[BLOCK_4x4].intra_pred[PLANAR_IDX] = x265_intra_pred_planar4_sse2;
> >         p.cu[BLOCK_8x8].intra_pred[PLANAR_IDX] = x265_intra_pred_planar8_sse2;
> >         p.cu[BLOCK_16x16].intra_pred[PLANAR_IDX] = x265_intra_pred_planar16_sse2;
> >+        p.cu[BLOCK_32x32].intra_pred[PLANAR_IDX] = x265_intra_pred_planar32_sse2;
> >
> >         p.cu[BLOCK_4x4].calcresidual = x265_getResidual4_sse2;
> >         p.cu[BLOCK_8x8].calcresidual = x265_getResidual8_sse2;
> >diff -r c16a875d913f -r ef383507c21c source/common/x86/intrapred.h
> >--- a/source/common/x86/intrapred.h	Thu Mar 05 16:01:49 2015 -0800
> >+++ b/source/common/x86/intrapred.h	Mon Mar 09 13:50:57 2015 -0700
> >@@ -38,6 +38,7 @@
> > void x265_intra_pred_planar4_sse2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int, int);
> > void x265_intra_pred_planar8_sse2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int, int);
> > void x265_intra_pred_planar16_sse2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int, int);
> >+void x265_intra_pred_planar32_sse2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int, int);
> > void x265_intra_pred_planar4_sse4(pixel* dst, intptr_t dstStride, const pixel* srcPix, int, int);
> > void x265_intra_pred_planar8_sse4(pixel* dst, intptr_t dstStride, const pixel* srcPix, int, int);
> > void x265_intra_pred_planar16_sse4(pixel* dst, intptr_t dstStride, const pixel* srcPix, int, int);
> >diff -r c16a875d913f -r ef383507c21c source/common/x86/intrapred8.asm
> >--- a/source/common/x86/intrapred8.asm	Thu Mar 05 16:01:49 2015 -0800
> >+++ b/source/common/x86/intrapred8.asm	Mon Mar 09 13:50:57 2015 -0700
> >@@ -734,6 +734,270 @@
> >     INTRA_PRED_PLANAR_16 15
> >     RET
> >
> >+;---------------------------------------------------------------------------------------
> >+; void intra_pred_planar(pixel* dst, intptr_t dstStride, pixel*srcPix, int, int filter)
> >+;---------------------------------------------------------------------------------------
> >+INIT_XMM sse2
> >+%macro PROCESS 1
> >+    pmullw          m5, %1, [pw_planar32_L]
> >+    pmullw          m6, %1, [pw_planar32_H]
> >+    paddw           m5, m0
> >+    paddw           m6, m1
> >+    psraw           m5, 6
> >+    psraw           m6, 6
> >+    packuswb        m5, m6
> >+    movu            [r0], m5
> >+
> >+    pmullw          m5, %1, [pw_planar16_0]
> >+    pmullw          %1, [pw_planar8_0]
> >+    paddw           m5, m2
> >+    paddw           %1, m3
> >+    psraw           m5, 6
> >+    psraw           %1, 6
> >+    packuswb        m5, %1
> >+    movu            [r0 + 16], m5
> >+%endmacro
> >+
> >+%if ARCH_X86_64 == 1
> >+cglobal intra_pred_planar32, 3,3,14
> >+    movd            m3, [r2 + 33]               ; topRight   = above[32]
> >+
> >+    pxor            m7, m7
> >+    pand            m3, [pw_00ff]
> >+    pshuflw         m3, m3, 0x00
> >+    pshufd          m3, m3, 0x44
> >+
> >+    pmullw          m0, m3, [multiL]            ; (x + 1) * topRight
> >+    pmullw          m1, m3, [multiH]            ; (x + 1) * topRight
> >+    pmullw          m2, m3, [multiH2]           ; (x + 1) * topRight
> >+    pmullw          m3, [multiH3]               ; (x + 1) * topRight
> >+
> >+    movd            m11, [r2 + 97]               ; bottomLeft = left[32]
> >+    pand            m11, [pw_00ff]
> >+    pshuflw         m11, m11, 0x00
> >+    pshufd          m11, m11, 0x44
> >+    mova            m5,  m11
> >+    paddw           m5,  [pw_32]
> >+
> >+    paddw           m0, m5
> >+    paddw           m1, m5
> >+    paddw           m2, m5
> >+    paddw           m3, m5
> >+    mova            m8, m11
> >+    mova            m9, m11
> >+    mova            m10, m11
> >+
> >+    movh            m4, [r2 + 1]
> >+    punpcklbw       m4, m7
> >+    psubw           m8, m4
> >+    pmullw          m4, [pw_planar32_1]
> >+    paddw           m0, m4
> >+
> >+    movh            m4, [r2 + 9]
> >+    punpcklbw       m4, m7
> >+    psubw           m9, m4
> >+    pmullw          m4, [pw_planar32_1]
> >+    paddw           m1, m4
> >+
> >+    movh            m4, [r2 + 17]
> >+    punpcklbw       m4, m7
> >+    psubw           m10, m4
> >+    pmullw          m4, [pw_planar32_1]
> >+    paddw           m2, m4
> >+
> >+    movh            m4, [r2 + 25]
> >+    punpcklbw       m4, m7
> >+    psubw           m11, m4
> >+    pmullw          m4, [pw_planar32_1]
> >+    paddw           m3, m4
> >+
> >+%macro INCREMENT 0
> >+    paddw           m2, m10
> >+    paddw           m3, m11
> >+    paddw           m0, m8
> >+    paddw           m1, m9
> >+    lea             r0, [r0 + r1]
> >+%endmacro
> >+
> >+%assign x 0
> >+%rep 2
> >+    movu            m4, [r2 + 65 + x * 16]
> >+    movu            m7, m4
> >+    pand            m4, [pw_00ff]
> >+    psrlw           m7, 8
> >+    pshuflw         m12, m4, 0x00
> >+    pshufd          m12, m12, 0x44
> >+    pshuflw         m13, m7, 0x00
> >+    pshufd          m13, m13, 0x44
> >+
> >+    PROCESS m12
> >+    INCREMENT
> >+    PROCESS m13
> >+    INCREMENT
> >+
> >+    pshuflw         m12, m4, 0x55
> >+    pshufd          m12, m12, 0x44
> >+    pshuflw         m13, m7, 0x55
> >+    pshufd          m13, m13, 0x44
> >+
> >+    PROCESS m12
> >+    INCREMENT
> >+    PROCESS m13
> >+    INCREMENT
> >+
> >+    pshuflw         m12, m4, 0xAA
> >+    pshufd          m12, m12, 0x44
> >+    pshuflw         m13, m7, 0xAA
> >+    pshufd          m13, m13, 0x44
> >+
> >+    PROCESS m12
> >+    INCREMENT
> >+    PROCESS m13
> >+    INCREMENT
> >+
> >+    pshuflw         m12, m4, 0xFF
> >+    pshufd          m12, m12, 0x44
> >+    pshuflw         m13, m7, 0xFF
> >+    pshufd          m13, m13, 0x44
> >+
> >+    PROCESS m12
> >+    INCREMENT
> >+    PROCESS m13
> >+    INCREMENT
> >+
> >+    pshufhw         m12, m4, 0x00
> >+    pshufd          m12, m12, 0xEE
> >+    pshufhw         m13, m7, 0x00
> >+    pshufd          m13, m13, 0xEE
> >+
> >+    PROCESS m12
> >+    INCREMENT
> >+    PROCESS m13
> >+    INCREMENT
> >+
> >+    pshufhw         m12, m4, 0x55
> >+    pshufd          m12, m12, 0xEE
> >+    pshufhw         m13, m7, 0x55
> >+    pshufd          m13, m13, 0xEE
> >+
> >+    PROCESS m12
> >+    INCREMENT
> >+    PROCESS m13
> >+    INCREMENT
> >+
> >+    pshufhw         m12, m4, 0xAA
> >+    pshufd          m12, m12, 0xEE
> >+    pshufhw         m13, m7, 0xAA
> >+    pshufd          m13, m13, 0xEE
> >+
> >+    PROCESS m12
> >+    INCREMENT
> >+    PROCESS m13
> >+    INCREMENT
> >+
> >+    pshufhw         m12, m4, 0xFF
> >+    pshufd          m12, m12, 0xEE
> >+    pshufhw         m13, m7, 0xFF
> >+    pshufd          m13, m13, 0xEE
> >+
> >+    PROCESS m12
> >+    INCREMENT
> >+    PROCESS m13
> >+
> >+    %if x < 1
> >+    INCREMENT
> >+    %endif
> >+%assign x x+1
> >+%endrep
> >+    RET
> >+
> >+%else ;end ARCH_X86_64, start ARCH_X86_32
> >+cglobal intra_pred_planar32, 3,3,8,0-(4*mmsize)
> >+    movd            m3, [r2 + 33]               ; topRight   = above[32]
> >+
> >+    pxor            m7, m7
> >+    pand            m3, [pw_00ff]
> >+    pshuflw         m3, m3, 0x00
> >+    pshufd          m3, m3, 0x44
> >+
> >+    pmullw          m0, m3, [multiL]            ; (x + 1) * topRight
> >+    pmullw          m1, m3, [multiH]            ; (x + 1) * topRight
> >+    pmullw          m2, m3, [multiH2]           ; (x + 1) * topRight
> >+    pmullw          m3, [multiH3]               ; (x + 1) * topRight
> >+
> >+    movd            m6, [r2 + 97]               ; bottomLeft = left[32]
> >+    pand            m6, [pw_00ff]
> >+    pshuflw         m6, m6, 0x00
> >+    pshufd          m6, m6, 0x44
> >+    mova            m5, m6
> >+    paddw           m5, [pw_32]
> >+
> >+    paddw           m0, m5
> >+    paddw           m1, m5
> >+    paddw           m2, m5
> >+    paddw           m3, m5
> >+
> >+    movh            m4, [r2 + 1]
> >+    punpcklbw       m4, m7
> >+    psubw           m5, m6, m4
> >+    mova            [rsp + 0 * mmsize], m5
> >+    pmullw          m4, [pw_planar32_1]
> >+    paddw           m0, m4
> >+
> >+    movh            m4, [r2 + 9]
> >+    punpcklbw       m4, m7
> >+    psubw           m5, m6, m4
> >+    mova            [rsp + 1 * mmsize], m5
> >+    pmullw          m4, [pw_planar32_1]
> >+    paddw           m1, m4
> >+
> >+    movh            m4, [r2 + 17]
> >+    punpcklbw       m4, m7
> >+    psubw           m5, m6, m4
> >+    mova            [rsp + 2 * mmsize], m5
> >+    pmullw          m4, [pw_planar32_1]
> >+    paddw           m2, m4
> >+
> >+    movh            m4, [r2 + 25]
> >+    punpcklbw       m4, m7
> >+    psubw           m5, m6, m4
> >+    mova            [rsp + 3 * mmsize], m5
> >+    pmullw          m4, [pw_planar32_1]
> >+    paddw           m3, m4
> >+
> >+%macro INCREMENT 0
> >+    paddw           m0, [rsp + 0 * mmsize]
> >+    paddw           m1, [rsp + 1 * mmsize]
> >+    paddw           m2, [rsp + 2 * mmsize]
> >+    paddw           m3, [rsp + 3 * mmsize]
> >+    lea             r0, [r0 + r1]
> >+%endmacro
> >+
> >+%assign x 0
> >+%rep 16
> >+    movd            m4, [r2 + 65 + x * 2]
> >+    movq            m7, m4
> >+    pand            m4, [pw_00ff]
> >+    psrlw           m7, 8
> >+
> >+    pshuflw         m4, m4, 0x00
> >+    pshufd          m4, m4, 0x44
> >+
> >+    pshuflw         m7, m7, 0x00
> >+    pshufd          m7, m7, 0x44
> >+
> >+    PROCESS m4
> >+    INCREMENT
> >+    PROCESS m7
> >+    %if x < 15
> >+    INCREMENT
> >+    %endif
> >+%assign x x+1
> >+%endrep
> >+    RET
> >+
> >+%endif ; end ARCH_X86_32
> >+
> > ;---------------------------------------------------------------------------------------------
> > ; void intra_pred_dc(pixel* dst, intptr_t dstStride, pixel *srcPix, int dirMode, int bFilter)
> > ;---------------------------------------------------------------------------------------------
> >_______________________________________________
> >x265-devel mailing list
> >x265-devel at videolan.org
> >https://mailman.videolan.org/listinfo/x265-devel
>
>
> _______________________________________________
> x265-devel mailing list
> x265-devel at videolan.org
> https://mailman.videolan.org/listinfo/x265-devel

-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mailman.videolan.org/pipermail/x265-devel/attachments/20150309/3bc1a32d/attachment.html>


More information about the x265-devel mailing list