[x265-commits] [x265] asm: interp_4tap_vert_pp_32xN sse2
David T Yuen
dtyx265 at gmail.com
Wed May 13 07:07:37 CEST 2015
details: http://hg.videolan.org/x265/rev/53edc0b1c6b0
branches:
changeset: 10429:53edc0b1c6b0
user: David T Yuen <dtyx265 at gmail.com>
date: Tue May 12 15:46:03 2015 -0700
description:
asm: interp_4tap_vert_pp_32xN sse2
This code replaces c code for 32x8, 32x16, 32x24, 32x32, 32x48 and 32x64
64-bit
/test/TestBench --testbench interp | grep vpp | grep "\[32x"
chroma_vpp[32x32] 8.08x 33694.79 272221.94
chroma_vpp[32x16] 8.08x 16937.50 136857.25
chroma_vpp[32x24] 8.10x 25313.88 205103.12
chroma_vpp[32x64] 8.16x 67336.32 549483.44
chroma_vpp[32x32] 8.10x 33641.55 272353.22
chroma_vpp[32x48] 8.11x 50615.54 410569.16
chroma_vpp[32x16] 8.13x 16937.77 137776.73
chroma_vpp[32x32] 8.04x 33824.15 271907.91
chroma_vpp[32x16] 8.17x 16937.72 138375.62
chroma_vpp[32x64] 8.14x 66996.15 545284.50
chroma_vpp[32x24] 8.11x 25328.33 205363.39
Subject: [x265] asm: interp_4tap_vert_pp sse2
details: http://hg.videolan.org/x265/rev/7f01a6dd81d9
branches:
changeset: 10430:7f01a6dd81d9
user: David T Yuen <dtyx265 at gmail.com>
date: Tue May 12 16:02:08 2015 -0700
description:
asm: interp_4tap_vert_pp sse2
This replaces c code for 48x64, 64x16, 64x32, 64x48 and 64x64
64-bit
./test/TestBench --testbench interp | grep vpp | grep "\[48x"
chroma_vpp[48x64] 8.18x 100417.78 821052.12
./test/TestBench --testbench interp | grep vpp | grep "\[64x"
chroma_vpp[64x64] 8.23x 135533.12 1115817.25
chroma_vpp[64x32] 8.17x 67360.61 550516.25
chroma_vpp[64x48] 8.29x 100938.14 837010.75
chroma_vpp[64x16] 8.25x 33651.41 277675.75
diffstat:
source/common/x86/asm-primitives.cpp | 18 ++
source/common/x86/ipfilter8.asm | 293 +++++++++++++++++++++++++++++++++++
source/common/x86/ipfilter8.h | 11 +
3 files changed, 322 insertions(+), 0 deletions(-)
diffs (truncated from 366 to 300 lines):
diff -r 6a8b7e352136 -r 7f01a6dd81d9 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Tue May 12 10:45:38 2015 -0500
+++ b/source/common/x86/asm-primitives.cpp Tue May 12 16:02:08 2015 -0700
@@ -1419,6 +1419,10 @@ void setupAssemblyPrimitives(EncoderPrim
p.chroma[X265_CSP_I420].pu[CHROMA_420_16x16].filter_vpp = x265_interp_4tap_vert_pp_16x16_sse2;
p.chroma[X265_CSP_I420].pu[CHROMA_420_16x32].filter_vpp = x265_interp_4tap_vert_pp_16x32_sse2;
p.chroma[X265_CSP_I420].pu[CHROMA_420_24x32].filter_vpp = x265_interp_4tap_vert_pp_24x32_sse2;
+ p.chroma[X265_CSP_I420].pu[CHROMA_420_32x8].filter_vpp = x265_interp_4tap_vert_pp_32x8_sse2;
+ p.chroma[X265_CSP_I420].pu[CHROMA_420_32x16].filter_vpp = x265_interp_4tap_vert_pp_32x16_sse2;
+ p.chroma[X265_CSP_I420].pu[CHROMA_420_32x24].filter_vpp = x265_interp_4tap_vert_pp_32x24_sse2;
+ p.chroma[X265_CSP_I420].pu[CHROMA_420_32x32].filter_vpp = x265_interp_4tap_vert_pp_32x32_sse2;
p.chroma[X265_CSP_I422].pu[CHROMA_422_6x16].filter_vpp = x265_interp_4tap_vert_pp_6x16_sse2;
p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].filter_vpp = x265_interp_4tap_vert_pp_8x4_sse2;
p.chroma[X265_CSP_I422].pu[CHROMA_422_8x4].filter_vpp = x265_interp_4tap_vert_pp_8x4_sse2;
@@ -1434,6 +1438,10 @@ void setupAssemblyPrimitives(EncoderPrim
p.chroma[X265_CSP_I422].pu[CHROMA_422_16x32].filter_vpp = x265_interp_4tap_vert_pp_16x32_sse2;
p.chroma[X265_CSP_I422].pu[CHROMA_422_16x64].filter_vpp = x265_interp_4tap_vert_pp_16x64_sse2;
p.chroma[X265_CSP_I422].pu[CHROMA_422_24x64].filter_vpp = x265_interp_4tap_vert_pp_24x64_sse2;
+ p.chroma[X265_CSP_I422].pu[CHROMA_422_32x16].filter_vpp = x265_interp_4tap_vert_pp_32x16_sse2;
+ p.chroma[X265_CSP_I422].pu[CHROMA_422_32x32].filter_vpp = x265_interp_4tap_vert_pp_32x32_sse2;
+ p.chroma[X265_CSP_I422].pu[CHROMA_422_32x48].filter_vpp = x265_interp_4tap_vert_pp_32x48_sse2;
+ p.chroma[X265_CSP_I422].pu[CHROMA_422_32x64].filter_vpp = x265_interp_4tap_vert_pp_32x64_sse2;
p.chroma[X265_CSP_I444].pu[LUMA_8x4].filter_vpp = x265_interp_4tap_vert_pp_8x4_sse2;
p.chroma[X265_CSP_I444].pu[LUMA_8x8].filter_vpp = x265_interp_4tap_vert_pp_8x8_sse2;
p.chroma[X265_CSP_I444].pu[LUMA_8x16].filter_vpp = x265_interp_4tap_vert_pp_8x16_sse2;
@@ -1446,6 +1454,16 @@ void setupAssemblyPrimitives(EncoderPrim
p.chroma[X265_CSP_I444].pu[LUMA_16x32].filter_vpp = x265_interp_4tap_vert_pp_16x32_sse2;
p.chroma[X265_CSP_I444].pu[LUMA_16x64].filter_vpp = x265_interp_4tap_vert_pp_16x64_sse2;
p.chroma[X265_CSP_I444].pu[LUMA_24x32].filter_vpp = x265_interp_4tap_vert_pp_24x32_sse2;
+ p.chroma[X265_CSP_I444].pu[LUMA_32x8].filter_vpp = x265_interp_4tap_vert_pp_32x8_sse2;
+ p.chroma[X265_CSP_I444].pu[LUMA_32x16].filter_vpp = x265_interp_4tap_vert_pp_32x16_sse2;
+ p.chroma[X265_CSP_I444].pu[LUMA_32x24].filter_vpp = x265_interp_4tap_vert_pp_32x24_sse2;
+ p.chroma[X265_CSP_I444].pu[LUMA_32x32].filter_vpp = x265_interp_4tap_vert_pp_32x32_sse2;
+ p.chroma[X265_CSP_I444].pu[LUMA_32x64].filter_vpp = x265_interp_4tap_vert_pp_32x64_sse2;
+ p.chroma[X265_CSP_I444].pu[LUMA_48x64].filter_vpp = x265_interp_4tap_vert_pp_48x64_sse2;
+ p.chroma[X265_CSP_I444].pu[LUMA_64x16].filter_vpp = x265_interp_4tap_vert_pp_64x16_sse2;
+ p.chroma[X265_CSP_I444].pu[LUMA_64x32].filter_vpp = x265_interp_4tap_vert_pp_64x32_sse2;
+ p.chroma[X265_CSP_I444].pu[LUMA_64x48].filter_vpp = x265_interp_4tap_vert_pp_64x48_sse2;
+ p.chroma[X265_CSP_I444].pu[LUMA_64x64].filter_vpp = x265_interp_4tap_vert_pp_64x64_sse2;
#endif
ALL_LUMA_PU(luma_hpp, interp_8tap_horiz_pp, sse2);
diff -r 6a8b7e352136 -r 7f01a6dd81d9 source/common/x86/ipfilter8.asm
--- a/source/common/x86/ipfilter8.asm Tue May 12 10:45:38 2015 -0500
+++ b/source/common/x86/ipfilter8.asm Tue May 12 16:02:08 2015 -0700
@@ -2177,6 +2177,299 @@ cglobal interp_4tap_vert_pp_24x%1, 4, 6,
FILTER_V4_W24_sse2 64
%endif
+;-----------------------------------------------------------------------------
+; void interp_4tap_vert_pp_32xN(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+%macro FILTER_V4_W32_sse2 1
+INIT_XMM sse2
+cglobal interp_4tap_vert_pp_32x%1, 4, 6, 10
+ mov r4d, r4m
+ sub r0, r1
+ shl r4d, 5
+ pxor m9, m9
+ mova m6, [pw_32]
+
+%ifdef PIC
+ lea r5, [tab_ChromaCoeffV]
+ mova m1, [r5 + r4]
+ mova m0, [r5 + r4 + 16]
+%else
+ mova m1, [tab_ChromaCoeffV + r4]
+ mova m0, [tab_ChromaCoeffV + r4 + 16]
+%endif
+
+ mov r4d, %1
+
+.loop:
+ movu m2, [r0]
+ movu m3, [r0 + r1]
+
+ punpcklbw m4, m2, m3
+ punpckhbw m2, m3
+
+ movhlps m8, m4
+ punpcklbw m4, m9
+ punpcklbw m8, m9
+ pmaddwd m4, m1
+ pmaddwd m8, m1
+ packssdw m4, m8
+
+ movhlps m8, m2
+ punpcklbw m2, m9
+ punpcklbw m8, m9
+ pmaddwd m2, m1
+ pmaddwd m8, m1
+ packssdw m2, m8
+
+ lea r5, [r0 + 2 * r1]
+ movu m3, [r5]
+ movu m5, [r5 + r1]
+
+ punpcklbw m7, m3, m5
+ punpckhbw m3, m5
+
+ movhlps m8, m7
+ punpcklbw m7, m9
+ punpcklbw m8, m9
+ pmaddwd m7, m0
+ pmaddwd m8, m0
+ packssdw m7, m8
+
+ movhlps m8, m3
+ punpcklbw m3, m9
+ punpcklbw m8, m9
+ pmaddwd m3, m0
+ pmaddwd m8, m0
+ packssdw m3, m8
+
+ paddw m4, m7
+ paddw m2, m3
+
+ paddw m4, m6
+ psraw m4, 6
+ paddw m2, m6
+ psraw m2, 6
+
+ packuswb m4, m2
+ movu [r2], m4
+
+ movu m2, [r0 + 16]
+ movu m3, [r0 + r1 + 16]
+
+ punpcklbw m4, m2, m3
+ punpckhbw m2, m3
+
+ movhlps m8, m4
+ punpcklbw m4, m9
+ punpcklbw m8, m9
+ pmaddwd m4, m1
+ pmaddwd m8, m1
+ packssdw m4, m8
+
+ movhlps m8, m2
+ punpcklbw m2, m9
+ punpcklbw m8, m9
+ pmaddwd m2, m1
+ pmaddwd m8, m1
+ packssdw m2, m8
+
+ movu m3, [r5 + 16]
+ movu m5, [r5 + r1 + 16]
+
+ punpcklbw m7, m3, m5
+ punpckhbw m3, m5
+
+ movhlps m8, m7
+ punpcklbw m7, m9
+ punpcklbw m8, m9
+ pmaddwd m7, m0
+ pmaddwd m8, m0
+ packssdw m7, m8
+
+ movhlps m8, m3
+ punpcklbw m3, m9
+ punpcklbw m8, m9
+ pmaddwd m3, m0
+ pmaddwd m8, m0
+ packssdw m3, m8
+
+ paddw m4, m7
+ paddw m2, m3
+
+ paddw m4, m6
+ psraw m4, 6
+ paddw m2, m6
+ psraw m2, 6
+
+ packuswb m4, m2
+ movu [r2 + 16], m4
+
+ lea r0, [r0 + r1]
+ lea r2, [r2 + r3]
+ dec r4
+ jnz .loop
+ RET
+
+%endmacro
+
+%if ARCH_X86_64
+ FILTER_V4_W32_sse2 8
+ FILTER_V4_W32_sse2 16
+ FILTER_V4_W32_sse2 24
+ FILTER_V4_W32_sse2 32
+
+ FILTER_V4_W32_sse2 48
+ FILTER_V4_W32_sse2 64
+%endif
+
+;-----------------------------------------------------------------------------
+; void interp_4tap_vert_pp_%1x%2(pixel *src, intptr_t srcStride, pixel *dst, intptr_t dstStride, int coeffIdx)
+;-----------------------------------------------------------------------------
+%macro FILTER_V4_W16n_H2_sse2 2
+INIT_XMM sse2
+cglobal interp_4tap_vert_pp_%1x%2, 4, 7, 11
+
+ mov r4d, r4m
+ sub r0, r1
+ shl r4d, 5
+ pxor m9, m9
+ mova m7, [pw_32]
+
+%ifdef PIC
+ lea r5, [tab_ChromaCoeffV]
+ mova m1, [r5 + r4]
+ mova m0, [r5 + r4 + 16]
+%else
+ mova m1, [tab_ChromaCoeffV + r4]
+ mova m0, [tab_ChromaCoeffV + r4 + 16]
+%endif
+
+ mov r4d, %2/2
+
+.loop:
+
+ mov r6d, %1/16
+
+.loopW:
+
+ movu m2, [r0]
+ movu m3, [r0 + r1]
+
+ punpcklbw m4, m2, m3
+ punpckhbw m2, m3
+
+ movhlps m8, m4
+ punpcklbw m4, m9
+ punpcklbw m8, m9
+ pmaddwd m4, m1
+ pmaddwd m8, m1
+ packssdw m4, m8
+
+ movhlps m8, m2
+ punpcklbw m2, m9
+ punpcklbw m8, m9
+ pmaddwd m2, m1
+ pmaddwd m8, m1
+ packssdw m2, m8
+
+ lea r5, [r0 + 2 * r1]
+ movu m5, [r5]
+ movu m6, [r5 + r1]
+
+ punpckhbw m10, m5, m6
+ movhlps m8, m10
+ punpcklbw m10, m9
+ punpcklbw m8, m9
+ pmaddwd m10, m0
+ pmaddwd m8, m0
+ packssdw m10, m8
+ paddw m2, m10
+
+ punpcklbw m10, m5, m6
+ movhlps m8, m10
+ punpcklbw m10, m9
+ punpcklbw m8, m9
+ pmaddwd m10, m0
+ pmaddwd m8, m0
+ packssdw m10, m8
+ paddw m4, m10
+
+ paddw m4, m7
+ psraw m4, 6
+ paddw m2, m7
+ psraw m2, 6
+
+ packuswb m4, m2
+ movu [r2], m4
+
+ punpcklbw m4, m3, m5
+ punpckhbw m3, m5
+
+ movhlps m8, m4
+ punpcklbw m4, m9
+ punpcklbw m8, m9
+ pmaddwd m4, m1
+ pmaddwd m8, m1
+ packssdw m4, m8
+
+ movhlps m8, m3
+ punpcklbw m3, m9
+ punpcklbw m8, m9
+ pmaddwd m3, m1
+ pmaddwd m8, m1
+ packssdw m3, m8
+
+ movu m5, [r5 + 2 * r1]
+
+ punpcklbw m2, m6, m5
+ punpckhbw m6, m5
+
+ movhlps m8, m2
+ punpcklbw m2, m9
+ punpcklbw m8, m9
+ pmaddwd m2, m0
More information about the x265-commits
mailing list