[x265] [PATCH] asm: 10bpp code for transpose 64x64
murugan at multicorewareinc.com
murugan at multicorewareinc.com
Wed Dec 4 08:23:28 CET 2013
# HG changeset patch
# User Murugan Vairavel <murugan at multicorewareinc.com>
# Date 1386141799 -19800
# Wed Dec 04 12:53:19 2013 +0530
# Node ID e1e18d9cd5b0fa7d14c655819bd347a5c8accbde
# Parent ee1221fac033355129128ba5f847910e3ed49047
asm: 10bpp code for transpose 64x64
diff -r ee1221fac033 -r e1e18d9cd5b0 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Wed Dec 04 12:33:17 2013 +0530
+++ b/source/common/x86/asm-primitives.cpp Wed Dec 04 12:53:19 2013 +0530
@@ -530,6 +530,7 @@
p.transpose[BLOCK_8x8] = x265_transpose8_sse2;
p.transpose[BLOCK_16x16] = x265_transpose16_sse2;
p.transpose[BLOCK_32x32] = x265_transpose32_sse2;
+ p.transpose[BLOCK_64x64] = x265_transpose64_sse2;
p.ssim_4x4x2_core = x265_pixel_ssim_4x4x2_core_sse2;
PIXEL_AVG(sse2);
diff -r ee1221fac033 -r e1e18d9cd5b0 source/common/x86/pixel-util8.asm
--- a/source/common/x86/pixel-util8.asm Wed Dec 04 12:33:17 2013 +0530
+++ b/source/common/x86/pixel-util8.asm Wed Dec 04 12:53:19 2013 +0530
@@ -1133,8 +1133,275 @@
; void transpose_64x64(pixel *dst, pixel *src, intptr_t stride)
;-----------------------------------------------------------------
INIT_XMM sse2
+%if HIGH_BIT_DEPTH
+cglobal transpose64, 3, 7, 4, dest, src, stride
+ add r2, r2
+ mov r3, r0
+ mov r4, r1
+ mov r5, 128
+ mov r6, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 16]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 32]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 48]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 64]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 80]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 96]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 112]
+ mov r3, r0
+ call transpose8_internal
+
+ lea r1, [r4 + 16]
+ lea r0, [r6 + 8 * 128]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 8 * 128 + 16]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 8 * 128 + 32]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 8 * 128 + 48]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 8 * 128 + 64]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 8 * 128 + 80]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 8 * 128 + 96]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 8 * 128 + 112]
+ mov r3, r0
+ call transpose8_internal
+
+ lea r1, [r4 + 32]
+ lea r0, [r6 + 16 * 128]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 16 * 128 + 16]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 16 * 128 + 32]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 16 * 128 + 48]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 16 * 128 + 64]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 16 * 128 + 80]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 16 * 128 + 96]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 16 * 128 + 112]
+ mov r3, r0
+ call transpose8_internal
+
+ lea r1, [r4 + 48]
+ lea r0, [r6 + 24 * 128]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 24 * 128 + 16]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 24 * 128 + 32]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 24 * 128 + 48]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 24 * 128 + 64]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 24 * 128 + 80]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 24 * 128 + 96]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 24 * 128 + 112]
+ mov r3, r0
+ call transpose8_internal
+
+ lea r1, [r4 + 64]
+ lea r0, [r6 + 32 * 128]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 32 * 128 + 16]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 32 * 128 + 32]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 32 * 128 + 48]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 32 * 128 + 64]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 32 * 128 + 80]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 32 * 128 + 96]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 32 * 128 + 112]
+ mov r3, r0
+ call transpose8_internal
+
+ lea r1, [r4 + 80]
+ lea r0, [r6 + 40 * 128]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 40 * 128 + 16]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 40 * 128 + 32]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 40 * 128 + 48]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 40 * 128 + 64]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 40 * 128 + 80]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 40 * 128 + 96]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 40 * 128 + 112]
+ mov r3, r0
+ call transpose8_internal
+
+ lea r1, [r4 + 96]
+ lea r0, [r6 + 48 * 128]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 48 * 128 + 16]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 48 * 128 + 32]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 48 * 128 + 48]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 48 * 128 + 64]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 48 * 128 + 80]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 48 * 128 + 96]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 48 * 128 + 112]
+ mov r3, r0
+ call transpose8_internal
+
+ lea r1, [r4 + 112]
+ lea r0, [r6 + 56 * 128]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 56 * 128 + 16]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 56 * 128 + 32]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 56 * 128 + 48]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 56 * 128 + 64]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 56 * 128 + 80]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 56 * 128 + 96]
+ mov r3, r0
+ call transpose8_internal
+ lea r1, [r1 - 8 + 2 * r2]
+ lea r0, [r6 + 56 * 128 + 112]
+ mov r3, r0
+ call transpose8_internal
+%else
cglobal transpose64, 3, 7, 8, dest, src, stride
-
mov r3, r0
mov r4, r1
mov r5, r0
@@ -1203,7 +1470,7 @@
lea r0, [r3 + 48 * 64 + 48]
mov r5, r0
call transpose16_internal
-
+%endif
RET
;=============================================================================
More information about the x265-devel
mailing list