[x265] [PATCH] asm: 10bpp code for blockcopy_ps_16xN
murugan at multicorewareinc.com
murugan at multicorewareinc.com
Tue Dec 10 08:16:36 CET 2013
# HG changeset patch
# User Murugan Vairavel <murugan at multicorewareinc.com>
# Date 1386659704 -19800
# Tue Dec 10 12:45:04 2013 +0530
# Node ID 093495408a1efc297ca292d6be182e3330bfb38d
# Parent 2959a996f8e562b867b7961cc7d66ab87633e71e
asm: 10bpp code for blockcopy_ps_16xN
diff -r 2959a996f8e5 -r 093495408a1e source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Tue Dec 10 12:12:56 2013 +0530
+++ b/source/common/x86/asm-primitives.cpp Tue Dec 10 12:45:04 2013 +0530
@@ -677,6 +677,13 @@
p.chroma[X265_CSP_I420].copy_pp[LUMA_16x16] = x265_blockcopy_pp_8x8_sse2;
p.chroma[X265_CSP_I420].copy_pp[LUMA_16x32] = x265_blockcopy_pp_8x16_sse2;
p.chroma[X265_CSP_I420].copy_pp[LUMA_16x64] = x265_blockcopy_pp_8x32_sse2;
+ p.chroma[X265_CSP_I420].copy_pp[LUMA_24x32] = x265_blockcopy_pp_12x16_sse2;
+ p.chroma[X265_CSP_I420].copy_pp[LUMA_32x8] = x265_blockcopy_pp_16x4_sse2;
+ p.chroma[X265_CSP_I420].copy_pp[LUMA_32x16] = x265_blockcopy_pp_16x8_sse2;
+ p.chroma[X265_CSP_I420].copy_pp[LUMA_32x24] = x265_blockcopy_pp_16x12_sse2;
+ p.chroma[X265_CSP_I420].copy_pp[LUMA_32x32] = x265_blockcopy_pp_16x16_sse2;
+ p.chroma[X265_CSP_I420].copy_pp[LUMA_32x64] = x265_blockcopy_pp_16x32_sse2;
+ p.luma_copy_pp[LUMA_16x64] = x265_blockcopy_pp_16x64_sse2;
}
if (cpuMask & X265_CPU_SSSE3)
{
diff -r 2959a996f8e5 -r 093495408a1e source/common/x86/blockcopy8.asm
--- a/source/common/x86/blockcopy8.asm Tue Dec 10 12:12:56 2013 +0530
+++ b/source/common/x86/blockcopy8.asm Tue Dec 10 12:45:04 2013 +0530
@@ -648,152 +648,160 @@
;-----------------------------------------------------------------------------
; void blockcopy_pp_16x4(pixel *dest, intptr_t deststride, pixel *src, intptr_t srcstride)
;-----------------------------------------------------------------------------
+%macro BLOCKCOPY_PP_W16_H4 2
INIT_XMM sse2
-cglobal blockcopy_pp_16x4, 4, 4, 4, dest, deststride, src, srcstride
-
-movu m0, [r2]
-movu m1, [r2 + r3]
-movu m2, [r2 + 2 * r3]
-lea r3, [r3 + r3 * 2]
-movu m3, [r2 + r3]
-
-movu [r0], m0
-movu [r0 + r1], m1
-movu [r0 + 2 * r1], m2
-lea r1, [r1 + 2 * r1]
-movu [r0 + r1], m3
-
-RET
-
-;-----------------------------------------------------------------------------
-; void blockcopy_pp_16x8(pixel *dest, intptr_t deststride, pixel *src, intptr_t srcstride)
-;-----------------------------------------------------------------------------
-INIT_XMM sse2
-cglobal blockcopy_pp_16x8, 4, 7, 8, dest, deststride, src, srcstride
-
-movu m0, [r2]
-movu m1, [r2 + r3]
-movu m2, [r2 + 2 * r3]
-lea r5, [r2 + 2 * r3]
-movu m3, [r5 + r3]
-
-movu m4, [r5 + 2 * r3]
-lea r5, [r5 + 2 * r3]
-movu m5, [r5 + r3]
-movu m6, [r5 + 2 * r3]
-lea r5, [r5 + 2 * r3]
-movu m7, [r5 + r3]
-
-movu [r0], m0
-movu [r0 + r1], m1
-movu [r0 + 2 * r1], m2
-lea r6, [r0 + 2 * r1]
-movu [r6 + r1], m3
-
-movu [r6 + 2 * r1], m4
-lea r6, [r6 + 2 * r1]
-movu [r6 + r1], m5
-movu [r6 + 2 * r1], m6
-lea r6, [r6 + 2 * r1]
-movu [r6 + r1], m7
-
-RET
-
-;-----------------------------------------------------------------------------
-; void blockcopy_pp_16x12(pixel *dest, intptr_t deststride, pixel *src, intptr_t srcstride)
-;-----------------------------------------------------------------------------
-INIT_XMM sse2
-cglobal blockcopy_pp_16x12, 4, 7, 8, dest, deststride, src, srcstride
-
-movu m0, [r2]
-movu m1, [r2 + r3]
-movu m2, [r2 + 2 * r3]
-lea r5, [r2 + 2 * r3]
-movu m3, [r5 + r3]
-
-movu m4, [r5 + 2 * r3]
-lea r5, [r5 + 2 * r3]
-movu m5, [r5 + r3]
-movu m6, [r5 + 2 * r3]
-lea r5, [r5 + 2 * r3]
-movu m7, [r5 + r3]
-
-movu [r0], m0
-movu [r0 + r1], m1
-movu [r0 + 2 * r1], m2
-lea r6, [r0 + 2 * r1]
-movu [r6 + r1], m3
-
-movu [r6 + 2 * r1], m4
-lea r6, [r6 + 2 * r1]
-movu [r6 + r1], m5
-movu [r6 + 2 * r1], m6
-lea r6, [r6 + 2 * r1]
-movu [r6 + r1], m7
-
-lea r0, [r0 + 8 * r1]
-lea r2, [r2 + 8 * r3]
-
-movu m0, [r2]
-movu m1, [r2 + r3]
-movu m2, [r2 + 2 * r3]
-lea r3, [r3 + r3 * 2]
-movu m3, [r2 + r3]
-
-movu [r0], m0
-movu [r0 + r1], m1
-movu [r0 + 2 * r1], m2
-lea r1, [r1 + 2 * r1]
-movu [r0 + r1], m3
-
-RET
+cglobal blockcopy_pp_%1x%2, 4, 5, 4, dest, deststride, src, srcstride
+ mov r4d, %2/4
+%if HIGH_BIT_DEPTH
+ add r1, r1
+ add r3, r3
+.loop
+ movu m0, [r2]
+ movu m1, [r2 + 16]
+ movu m2, [r2 + r3]
+ movu m3, [r2 + r3 + 16]
+ lea r2, [r2 + 2 * r3]
+
+ movu [r0], m0
+ movu [r0 + 16], m1
+ movu [r0 + r1], m2
+ movu [r0 + r1 + 16], m3
+
+ lea r0, [r0 + 2 * r1]
+ movu m0, [r2]
+ movu m1, [r2 + 16]
+ movu m2, [r2 + r3]
+ movu m3, [r2 + r3 + 16]
+
+ movu [r0], m0
+ movu [r0 + 16], m1
+ movu [r0 + r1], m2
+ movu [r0 + r1 + 16], m3
+
+ dec r4d
+ lea r0, [r0 + 2 * r1]
+ lea r2, [r2 + 2 * r3]
+ jnz .loop
+%else
+.loop
+ movu m0, [r2]
+ movu m1, [r2 + r3]
+ lea r2, [r2 + 2 * r3]
+ movu m2, [r2]
+ movu m3, [r2 + r3]
+
+ movu [r0], m0
+ movu [r0 + r1], m1
+ lea r0, [r0 + 2 * r1]
+ movu [r0], m2
+ movu [r0 + r1], m3
+
+ dec r4d
+ lea r0, [r0 + 2 * r1]
+ lea r2, [r2 + 2 * r3]
+ jnz .loop
+%endif
+ RET
+%endmacro
+
+BLOCKCOPY_PP_W16_H4 16, 4
+BLOCKCOPY_PP_W16_H4 16, 12
;-----------------------------------------------------------------------------
; void blockcopy_pp_%1x%2(pixel *dest, intptr_t deststride, pixel *src, intptr_t srcstride)
;-----------------------------------------------------------------------------
%macro BLOCKCOPY_PP_W16_H8 2
INIT_XMM sse2
-cglobal blockcopy_pp_%1x%2, 4, 7, 8, dest, deststride, src, srcstride
-
-
-mov r4d, %2
-
+cglobal blockcopy_pp_%1x%2, 4, 5, 6, dest, deststride, src, srcstride
+ mov r4d, %2/8
+%if HIGH_BIT_DEPTH
+ add r1, r1
+ add r3, r3
.loop
- movu m0, [r2]
- movu m1, [r2 + r3]
- movu m2, [r2 + 2 * r3]
- lea r5, [r2 + 2 * r3]
- movu m3, [r5 + r3]
-
- movu m4, [r5 + 2 * r3]
- lea r5, [r5 + 2 * r3]
- movu m5, [r5 + r3]
- movu m6, [r5 + 2 * r3]
- lea r5, [r5 + 2 * r3]
- movu m7, [r5 + r3]
-
- movu [r0], m0
- movu [r0 + r1], m1
- movu [r0 + 2 * r1], m2
- lea r6, [r0 + 2 * r1]
- movu [r6 + r1], m3
-
- movu [r6 + 2 * r1], m4
- lea r6, [r6 + 2 * r1]
- movu [r6 + r1], m5
- movu [r6 + 2 * r1], m6
- lea r6, [r6 + 2 * r1]
- movu [r6 + r1], m7
-
- lea r0, [r0 + 8 * r1]
- lea r2, [r2 + 8 * r3]
-
- sub r4d, 8
- jnz .loop
-
-RET
+ movu m0, [r2]
+ movu m1, [r2 + 16]
+ movu m2, [r2 + r3]
+ movu m3, [r2 + r3 + 16]
+ lea r2, [r2 + 2 * r3]
+ movu m4, [r2]
+ movu m5, [r2 + 16]
+
+ movu [r0], m0
+ movu [r0 + 16], m1
+ movu [r0 + r1], m2
+ movu [r0 + r1 + 16], m3
+ lea r0, [r0 + 2 * r1]
+ movu [r0], m4
+ movu [r0 + 16], m5
+
+ movu m0, [r2 + r3]
+ movu m1, [r2 + r3 + 16]
+ lea r2, [r2 + 2 * r3]
+ movu m2, [r2]
+ movu m3, [r2 + 16]
+ movu m4, [r2 + r3]
+ movu m5, [r2 + r3 + 16]
+ lea r2, [r2 + 2 * r3]
+
+ movu [r0 + r1], m0
+ movu [r0 + r1 + 16], m1
+ lea r0, [r0 + 2 * r1]
+ movu [r0], m2
+ movu [r0 + 16], m3
+ movu [r0 + r1], m4
+ movu [r0 + r1 + 16], m5
+ lea r0, [r0 + 2 * r1]
+
+ movu m0, [r2]
+ movu m1, [r2 + 16]
+ movu m2, [r2 + r3]
+ movu m3, [r2 + r3 + 16]
+
+ movu [r0], m0
+ movu [r0 + 16], m1
+ movu [r0 + r1], m2
+ movu [r0 + r1 + 16], m3
+
+ dec r4d
+ lea r0, [r0 + 2 * r1]
+ lea r2, [r2 + 2 * r3]
+ jnz .loop
+%else
+.loop
+ movu m0, [r2]
+ movu m1, [r2 + r3]
+ lea r2, [r2 + 2 * r3]
+ movu m2, [r2]
+ movu m3, [r2 + r3]
+ lea r2, [r2 + 2 * r3]
+ movu m4, [r2]
+ movu m5, [r2 + r3]
+ lea r2, [r2 + 2 * r3]
+
+ movu [r0], m0
+ movu [r0 + r1], m1
+ lea r0, [r0 + 2 * r1]
+ movu [r0], m2
+ movu [r0 + r1], m3
+ lea r0, [r0 + 2 * r1]
+ movu [r0], m4
+ movu [r0 + r1], m5
+ lea r0, [r0 + 2 * r1]
+
+ movu m0, [r2]
+ movu m1, [r2 + r3]
+ movu [r0], m0
+ movu [r0 + r1], m1
+
+ dec r4d
+ lea r0, [r0 + 2 * r1]
+ lea r2, [r2 + 2 * r3]
+ jnz .loop
+%endif
+ RET
%endmacro
+BLOCKCOPY_PP_W16_H8 16, 8
BLOCKCOPY_PP_W16_H8 16, 16
BLOCKCOPY_PP_W16_H8 16, 32
BLOCKCOPY_PP_W16_H8 16, 64
More information about the x265-devel
mailing list