[x265] [PATCH 2 of 2] asm: interp_4tap_vert_pp sse2

dtyx265 at gmail.com dtyx265 at gmail.com
Wed May 13 01:03:57 CEST 2015


# HG changeset patch
# User David T Yuen <dtyx265 at gmail.com>
# Date 1431471728 25200
# Node ID 7f01a6dd81d9ec93b858e8b9328b1ee4e3b19c81
# Parent  53edc0b1c6b0c91c114f50164b68273a3b720e78
asm: interp_4tap_vert_pp sse2

This replaces c code for 48x64, 64x16, 64x32, 64x48 and 64x64

64-bit

./test/TestBench --testbench interp | grep vpp | grep "\[48x"
chroma_vpp[48x64]	8.18x 	 100417.78 	 821052.12

./test/TestBench --testbench interp | grep vpp | grep "\[64x"
chroma_vpp[64x64]	8.23x 	 135533.12 	 1115817.25
chroma_vpp[64x32]	8.17x 	 67360.61 	 550516.25
chroma_vpp[64x48]	8.29x 	 100938.14 	 837010.75
chroma_vpp[64x16]	8.25x 	 33651.41 	 277675.75

diff -r 53edc0b1c6b0 -r 7f01a6dd81d9 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Tue May 12 15:46:03 2015 -0700
+++ b/source/common/x86/asm-primitives.cpp	Tue May 12 16:02:08 2015 -0700
@@ -1459,6 +1459,11 @@
         p.chroma[X265_CSP_I444].pu[LUMA_32x24].filter_vpp = x265_interp_4tap_vert_pp_32x24_sse2;
         p.chroma[X265_CSP_I444].pu[LUMA_32x32].filter_vpp = x265_interp_4tap_vert_pp_32x32_sse2;
         p.chroma[X265_CSP_I444].pu[LUMA_32x64].filter_vpp = x265_interp_4tap_vert_pp_32x64_sse2;
+        p.chroma[X265_CSP_I444].pu[LUMA_48x64].filter_vpp = x265_interp_4tap_vert_pp_48x64_sse2;
+        p.chroma[X265_CSP_I444].pu[LUMA_64x16].filter_vpp = x265_interp_4tap_vert_pp_64x16_sse2;
+        p.chroma[X265_CSP_I444].pu[LUMA_64x32].filter_vpp = x265_interp_4tap_vert_pp_64x32_sse2;
+        p.chroma[X265_CSP_I444].pu[LUMA_64x48].filter_vpp = x265_interp_4tap_vert_pp_64x48_sse2;
+        p.chroma[X265_CSP_I444].pu[LUMA_64x64].filter_vpp = x265_interp_4tap_vert_pp_64x64_sse2;
 #endif
 
         ALL_LUMA_PU(luma_hpp, interp_8tap_horiz_pp, sse2);
diff -r 53edc0b1c6b0 -r 7f01a6dd81d9 source/common/x86/ipfilter8.asm
--- a/source/common/x86/ipfilter8.asm	Tue May 12 15:46:03 2015 -0700
+++ b/source/common/x86/ipfilter8.asm	Tue May 12 16:02:08 2015 -0700
@@ -2322,6 +2322,154 @@
     FILTER_V4_W32_sse2 64
 %endif
 
+;-----------------------------------------------------------------------------
+; void interp_4tap_vert_pp_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+%macro FILTER_V4_W16n_H2_sse2 2
+INIT_XMM sse2
+cglobal interp_4tap_vert_pp_%1x%2, 4, 7, 11
+
+    mov         r4d,       r4m
+    sub         r0,        r1
+    shl         r4d,       5
+    pxor        m9,        m9
+    mova        m7,        [pw_32]
+
+%ifdef PIC
+    lea         r5,        [tab_ChromaCoeffV]
+    mova        m1,        [r5 + r4]
+    mova        m0,        [r5 + r4 + 16]
+%else
+    mova        m1,        [tab_ChromaCoeffV + r4]
+    mova        m0,        [tab_ChromaCoeffV + r4 + 16]
+%endif
+
+    mov         r4d,       %2/2
+
+.loop:
+
+    mov         r6d,       %1/16
+
+.loopW:
+
+    movu        m2,        [r0]
+    movu        m3,        [r0 + r1]
+
+    punpcklbw   m4,        m2,        m3
+    punpckhbw   m2,        m3
+
+    movhlps     m8,        m4
+    punpcklbw   m4,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m4,        m1
+    pmaddwd     m8,        m1
+    packssdw    m4,        m8
+
+    movhlps     m8,        m2
+    punpcklbw   m2,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m2,        m1
+    pmaddwd     m8,        m1
+    packssdw    m2,        m8
+
+    lea         r5,        [r0 + 2 * r1]
+    movu        m5,        [r5]
+    movu        m6,        [r5 + r1]
+
+    punpckhbw   m10,        m5,        m6
+    movhlps     m8,        m10
+    punpcklbw   m10,       m9
+    punpcklbw   m8,        m9
+    pmaddwd     m10,       m0
+    pmaddwd     m8,        m0
+    packssdw    m10,       m8
+    paddw       m2,        m10
+
+    punpcklbw   m10,        m5,        m6
+    movhlps     m8,        m10
+    punpcklbw   m10,       m9
+    punpcklbw   m8,        m9
+    pmaddwd     m10,       m0
+    pmaddwd     m8,        m0
+    packssdw    m10,       m8
+    paddw       m4,        m10
+
+    paddw       m4,        m7
+    psraw       m4,        6
+    paddw       m2,        m7
+    psraw       m2,        6
+
+    packuswb    m4,        m2
+    movu        [r2],      m4
+
+    punpcklbw   m4,        m3,        m5
+    punpckhbw   m3,        m5
+
+    movhlps     m8,        m4
+    punpcklbw   m4,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m4,        m1
+    pmaddwd     m8,        m1
+    packssdw    m4,        m8
+
+    movhlps     m8,        m3
+    punpcklbw   m3,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m3,        m1
+    pmaddwd     m8,        m1
+    packssdw    m3,        m8
+
+    movu        m5,        [r5 + 2 * r1]
+
+    punpcklbw   m2,        m6,        m5
+    punpckhbw   m6,        m5
+
+    movhlps     m8,        m2
+    punpcklbw   m2,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m2,        m0
+    pmaddwd     m8,        m0
+    packssdw    m2,        m8
+
+    movhlps     m8,        m6
+    punpcklbw   m6,        m9
+    punpcklbw   m8,        m9
+    pmaddwd     m6,        m0
+    pmaddwd     m8,        m0
+    packssdw    m6,        m8
+
+    paddw       m4,        m2
+    paddw       m3,        m6
+
+    paddw       m4,        m7
+    psraw       m4,        6
+    paddw       m3,        m7
+    psraw       m3,        6
+
+    packuswb    m4,        m3
+    movu        [r2 + r3],      m4
+
+    add         r0,        16
+    add         r2,        16
+    dec         r6d
+    jnz         .loopW
+
+    lea         r0,        [r0 + r1 * 2 - %1]
+    lea         r2,        [r2 + r3 * 2 - %1]
+
+    dec         r4d
+    jnz        .loop
+    RET
+%endmacro
+
+%if ARCH_X86_64
+    FILTER_V4_W16n_H2_sse2 64, 64
+    FILTER_V4_W16n_H2_sse2 64, 32
+    FILTER_V4_W16n_H2_sse2 64, 48
+    FILTER_V4_W16n_H2_sse2 48, 64
+    FILTER_V4_W16n_H2_sse2 64, 16
+%endif
+
 %macro FILTER_H4_w2_2 3
     movh        %2, [srcq - 1]
     pshufb      %2, %2, Tm0
diff -r 53edc0b1c6b0 -r 7f01a6dd81d9 source/common/x86/ipfilter8.h
--- a/source/common/x86/ipfilter8.h	Tue May 12 15:46:03 2015 -0700
+++ b/source/common/x86/ipfilter8.h	Tue May 12 16:02:08 2015 -0700
@@ -941,6 +941,11 @@
 void x265_interp_4tap_vert_pp_32x32_sse2(const pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx);
 void x265_interp_4tap_vert_pp_32x48_sse2(const pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx);
 void x265_interp_4tap_vert_pp_32x64_sse2(const pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx);
+void x265_interp_4tap_vert_pp_48x64_sse2(const pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx);
+void x265_interp_4tap_vert_pp_64x16_sse2(const pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx);
+void x265_interp_4tap_vert_pp_64x32_sse2(const pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx);
+void x265_interp_4tap_vert_pp_64x48_sse2(const pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx);
+void x265_interp_4tap_vert_pp_64x64_sse2(const pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx);
 #endif
 #undef LUMA_FILTERS
 #undef LUMA_SP_FILTERS


More information about the x265-devel mailing list