[x265] [PATCH] asm: assembly code for pixel_sa8d_32x32
yuvaraj at multicorewareinc.com
yuvaraj at multicorewareinc.com
Wed Nov 20 10:39:23 CET 2013
# HG changeset patch
# User Yuvaraj Venkatesh <yuvaraj at multicorewareinc.com>
# Date 1384934763 -19800
# Wed Nov 20 13:36:03 2013 +0530
# Node ID 8101c173682f78bec756fc168530e79224db1e01
# Parent 108ddc9e5c6b15e758ccbf08a0e923cbb7b28b5e
asm: assembly code for pixel_sa8d_32x32
diff -r 108ddc9e5c6b -r 8101c173682f source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Tue Nov 19 23:45:52 2013 -0600
+++ b/source/common/x86/asm-primitives.cpp Wed Nov 20 13:36:03 2013 +0530
@@ -95,9 +95,9 @@
p.sa8d_inter[LUMA_8x32] = cmp < 8, 32, 8, 8, x265_pixel_sa8d_8x8_ ## cpu >
#define SA8D_INTER_FROM_BLOCK(cpu) \
SA8D_INTER_FROM_BLOCK8(cpu); \
- p.sa8d[BLOCK_32x32] = cmp<32, 32, 16, 16, x265_pixel_sa8d_16x16_ ## cpu>; \
+ p.sa8d[BLOCK_32x32] = x265_pixel_sa8d_32x32_ ## cpu; \
p.sa8d[BLOCK_64x64] = cmp<64, 64, 16, 16, x265_pixel_sa8d_16x16_ ## cpu>; \
- p.sa8d_inter[LUMA_32x32] = cmp<32, 32, 16, 16, x265_pixel_sa8d_16x16_ ## cpu>; \
+ p.sa8d_inter[LUMA_32x32] = x265_pixel_sa8d_32x32_ ## cpu; \
p.sa8d_inter[LUMA_32x16] = cmp<32, 16, 16, 16, x265_pixel_sa8d_16x16_ ## cpu>; \
p.sa8d_inter[LUMA_16x32] = cmp<16, 32, 16, 16, x265_pixel_sa8d_16x16_ ## cpu>; \
p.sa8d_inter[LUMA_64x64] = cmp<64, 64, 16, 16, x265_pixel_sa8d_16x16_ ## cpu>; \
diff -r 108ddc9e5c6b -r 8101c173682f source/common/x86/pixel-a.asm
--- a/source/common/x86/pixel-a.asm Tue Nov 19 23:45:52 2013 -0600
+++ b/source/common/x86/pixel-a.asm Wed Nov 20 13:36:03 2013 +0530
@@ -3471,55 +3471,7 @@
%endif ; HIGH_BIT_DEPTH
%endmacro
-%macro SA8D 0
-; sse2 doesn't seem to like the horizontal way of doing things
-%define vertical ((notcpuflag(ssse3) || cpuflag(atom)) || HIGH_BIT_DEPTH)
-
-%if ARCH_X86_64
-;-----------------------------------------------------------------------------
-; int pixel_sa8d_8x8( uint8_t *, intptr_t, uint8_t *, intptr_t )
-;-----------------------------------------------------------------------------
-cglobal pixel_sa8d_8x8_internal
- lea r6, [r0+4*r1]
- lea r7, [r2+4*r3]
- LOAD_SUMSUB_8x4P 0, 1, 2, 8, 5, 6, 7, r0, r2
- LOAD_SUMSUB_8x4P 4, 5, 3, 9, 11, 6, 7, r6, r7
-%if vertical
- HADAMARD8_2D 0, 1, 2, 8, 4, 5, 3, 9, 6, amax
-%else ; non-sse2
- HADAMARD8_2D_HMUL 0, 1, 2, 8, 4, 5, 3, 9, 6, 11
-%endif
- paddw m0, m1
- paddw m0, m2
- paddw m0, m8
- SAVE_MM_PERMUTATION
- ret
-
-cglobal pixel_sa8d_8x8, 4,8,12
- FIX_STRIDES r1, r3
- lea r4, [3*r1]
- lea r5, [3*r3]
-%if vertical == 0
- mova m7, [hmul_8p]
-%endif
- call pixel_sa8d_8x8_internal
-%if HIGH_BIT_DEPTH
- HADDUW m0, m1
-%else
- HADDW m0, m1
-%endif ; HIGH_BIT_DEPTH
- movd eax, m0
- add eax, 1
- shr eax, 1
- RET
-
-cglobal pixel_sa8d_16x16, 4,8,12
- FIX_STRIDES r1, r3
- lea r4, [3*r1]
- lea r5, [3*r3]
-%if vertical == 0
- mova m7, [hmul_8p]
-%endif
+%macro SA8D_16x16 0
call pixel_sa8d_8x8_internal ; pix[0]
add r2, 8*SIZEOF_PIXEL
add r0, 8*SIZEOF_PIXEL
@@ -3541,11 +3493,119 @@
%if HIGH_BIT_DEPTH == 0
HADDUW m0, m1
%endif
+ paddd m0, [pd_1]
+ psrld m0, 1
+ paddd m12, m0
+%endmacro
+
+%macro SA8D 0
+; sse2 doesn't seem to like the horizontal way of doing things
+%define vertical ((notcpuflag(ssse3) || cpuflag(atom)) || HIGH_BIT_DEPTH)
+
+%if ARCH_X86_64
+;-----------------------------------------------------------------------------
+; int pixel_sa8d_8x8( uint8_t *, intptr_t, uint8_t *, intptr_t )
+;-----------------------------------------------------------------------------
+cglobal pixel_sa8d_8x8_internal
+ lea r6, [r0+4*r1]
+ lea r7, [r2+4*r3]
+ LOAD_SUMSUB_8x4P 0, 1, 2, 8, 5, 6, 7, r0, r2
+ LOAD_SUMSUB_8x4P 4, 5, 3, 9, 11, 6, 7, r6, r7
+%if vertical
+ HADAMARD8_2D 0, 1, 2, 8, 4, 5, 3, 9, 6, amax
+%else ; non-sse2
+ HADAMARD8_2D_HMUL 0, 1, 2, 8, 4, 5, 3, 9, 6, 11
+%endif
+ paddw m0, m1
+ paddw m0, m2
+ paddw m0, m8
+ SAVE_MM_PERMUTATION
+ ret
+
+cglobal pixel_sa8d_8x8, 4,8,12
+ FIX_STRIDES r1, r3
+ lea r4, [3*r1]
+ lea r5, [3*r3]
+%if vertical == 0
+ mova m7, [hmul_8p]
+%endif
+ call pixel_sa8d_8x8_internal
+%if HIGH_BIT_DEPTH
+ HADDUW m0, m1
+%else
+ HADDW m0, m1
+%endif ; HIGH_BIT_DEPTH
+ movd eax, m0
+ add eax, 1
+ shr eax, 1
+ RET
+
+cglobal pixel_sa8d_16x16, 4,8,12
+ FIX_STRIDES r1, r3
+ lea r4, [3*r1]
+ lea r5, [3*r3]
+%if vertical == 0
+ mova m7, [hmul_8p]
+%endif
+ call pixel_sa8d_8x8_internal ; pix[0]
+ add r2, 8*SIZEOF_PIXEL
+ add r0, 8*SIZEOF_PIXEL
+%if HIGH_BIT_DEPTH
+ HADDUW m0, m1
+%endif
+ mova m10, m0
+ call pixel_sa8d_8x8_internal ; pix[8]
+ lea r2, [r2+8*r3]
+ lea r0, [r0+8*r1]
+ SA8D_INTER
+ call pixel_sa8d_8x8_internal ; pix[8*stride+8]
+ sub r2, 8*SIZEOF_PIXEL
+ sub r0, 8*SIZEOF_PIXEL
+ SA8D_INTER
+ call pixel_sa8d_8x8_internal ; pix[8*stride]
+ SA8D_INTER
+ SWAP 0, 10
+%if HIGH_BIT_DEPTH == 0
+ HADDUW m0, m1
+%endif
movd eax, m0
add eax, 1
shr eax, 1
RET
+cglobal pixel_sa8d_32x32, 4,8,12
+ FIX_STRIDES r1, r3
+ lea r4, [3*r1]
+ lea r5, [3*r3]
+ pxor m12, m12
+%if vertical == 0
+ mova m7, [hmul_8p]
+%endif
+ SA8D_16x16
+ lea r4, [8*r1]
+ lea r5, [8*r3]
+ sub r2, r4
+ sub r0, r5
+ add r2, 16
+ add r0, 16
+ lea r4, [3*r1]
+ lea r5, [3*r3]
+ SA8D_16x16
+ lea r0, [r0+8*r1]
+ lea r2, [r2+8*r3]
+ SA8D_16x16
+ lea r4, [8*r1]
+ lea r5, [8*r3]
+ sub r2, r4
+ sub r0, r5
+ sub r2, 16
+ sub r0, 16
+ lea r4, [3*r1]
+ lea r5, [3*r3]
+ SA8D_16x16
+ movd eax, m12
+ RET
+
%else ; ARCH_X86_32
%if mmsize == 16
cglobal pixel_sa8d_8x8_internal
@@ -3599,6 +3659,22 @@
ret
%endif ; ifndef mmx2
+cglobal pixel_sa8d_8x8_internal2
+ %define spill0 [esp+4]
+ LOAD_DIFF_8x4P 0, 1, 2, 3, 4, 5, 6, r0, r2, 1
+ HADAMARD4_2D 0, 1, 2, 3, 4
+ movdqa spill0, m3
+ LOAD_DIFF_8x4P 4, 5, 6, 7, 3, 3, 2, r0, r2, 1
+ HADAMARD4_2D 4, 5, 6, 7, 3
+ HADAMARD2_2D 0, 4, 1, 5, 3, qdq, amax
+ movdqa m3, spill0
+ paddw m0, m1
+ HADAMARD2_2D 2, 6, 3, 7, 5, qdq, amax
+ paddw m0, m2
+ paddw m0, m3
+ SAVE_MM_PERMUTATION
+ ret
+
cglobal pixel_sa8d_8x8, 4,7
FIX_STRIDES r1, r3
mov r6, esp
@@ -3676,6 +3752,132 @@
shr eax, 1
mov esp, r6
RET
+
+cglobal pixel_sa8d_32x32, 4,7,8
+ FIX_STRIDES r1, r3
+ mov r6, esp
+ and esp, ~15
+ sub esp, 64
+
+ lea r4, [r1 + 2*r1]
+ lea r5, [r3 + 2*r3]
+ call pixel_sa8d_8x8_internal2
+ mova [rsp+48], m0
+ call pixel_sa8d_8x8_internal2
+ SA8D_INTER
+ mova [esp+48], m0
+
+ mov r0, [r6+20]
+ mov r2, [r6+28]
+ add r0, 8*SIZEOF_PIXEL
+ add r2, 8*SIZEOF_PIXEL
+ call pixel_sa8d_8x8_internal2
+ SA8D_INTER
+ mova [esp+48], m0
+ call pixel_sa8d_8x8_internal2
+ paddusw m0, [esp+48]
+ HADDUW m0, m1
+ movd r4d, m0
+ add r4d, 1
+ shr r4d, 1
+ mov dword [esp+36], r4d
+
+ mov r0, [r6+20]
+ mov r2, [r6+28]
+ add r0, 16*SIZEOF_PIXEL
+ add r2, 16*SIZEOF_PIXEL
+ lea r4, [r1 + 2*r1]
+ call pixel_sa8d_8x8_internal2
+ mova [esp+48], m0
+ call pixel_sa8d_8x8_internal2
+ SA8D_INTER
+ mova [esp+48], m0
+
+ mov r0, [r6+20]
+ mov r2, [r6+28]
+ add r0, 24*SIZEOF_PIXEL
+ add r2, 24*SIZEOF_PIXEL
+ call pixel_sa8d_8x8_internal2
+ SA8D_INTER
+ mova [esp+64-mmsize], m0
+ call pixel_sa8d_8x8_internal2
+ paddusw m0, [esp+48]
+ HADDUW m0, m1
+ movd r4d, m0
+ add r4d, 1
+ shr r4d, 1
+ add r4d, dword [esp+36]
+ mov dword [esp+36], r4d
+
+ mov r0, [r6+20]
+ mov r2, [r6+28]
+ lea r0, [r0 + r1*8]
+ lea r2, [r2 + r3*8]
+ lea r0, [r0 + r1*8]
+ lea r2, [r2 + r3*8]
+ lea r4, [r1 + 2*r1]
+ call pixel_sa8d_8x8_internal2
+ mova [esp+48], m0
+ call pixel_sa8d_8x8_internal2
+ SA8D_INTER
+ mova [esp+48], m0
+
+ mov r0, [r6+20]
+ mov r2, [r6+28]
+ lea r0, [r0 + r1*8]
+ lea r2, [r2 + r3*8]
+ lea r0, [r0 + r1*8]
+ lea r2, [r2 + r3*8]
+ add r0, 8*SIZEOF_PIXEL
+ add r2, 8*SIZEOF_PIXEL
+ call pixel_sa8d_8x8_internal2
+ SA8D_INTER
+ mova [esp+64-mmsize], m0
+ call pixel_sa8d_8x8_internal2
+ paddusw m0, [esp+48]
+ HADDUW m0, m1
+ movd r4d, m0
+ add r4d, 1
+ shr r4d, 1
+ add r4d, dword [esp+36]
+ mov dword [esp+36], r4d
+
+ mov r0, [r6+20]
+ mov r2, [r6+28]
+ lea r0, [r0 + r1*8]
+ lea r2, [r2 + r3*8]
+ lea r0, [r0 + r1*8]
+ lea r2, [r2 + r3*8]
+ add r0, 16*SIZEOF_PIXEL
+ add r2, 16*SIZEOF_PIXEL
+ lea r4, [r1 + 2*r1]
+ call pixel_sa8d_8x8_internal2
+ mova [esp+48], m0
+ call pixel_sa8d_8x8_internal2
+ SA8D_INTER
+ mova [esp+48], m0
+
+ mov r0, [r6+20]
+ mov r2, [r6+28]
+ lea r0, [r0 + r1*8]
+ lea r2, [r2 + r3*8]
+ lea r0, [r0 + r1*8]
+ lea r2, [r2 + r3*8]
+ add r0, 24*SIZEOF_PIXEL
+ add r2, 24*SIZEOF_PIXEL
+ call pixel_sa8d_8x8_internal2
+ SA8D_INTER
+ mova [esp+64-mmsize], m0
+ call pixel_sa8d_8x8_internal2
+ paddusw m0, [esp+48]
+ HADDUW m0, m1
+ movd r4d, m0
+ add r4d, 1
+ shr r4d, 1
+ add r4d, dword [esp+36]
+ mov eax, r4d
+ mov esp, r6
+ RET
%endif ; !ARCH_X86_64
%endmacro ; SA8D
@@ -5933,7 +6135,7 @@
%if ARCH_X86_64 == 0
cextern pixel_sa8d_8x8_internal_mmx2
INIT_MMX mmx2
-SA8D
+;SA8D
%endif
%define TRANS TRANS_SSE2
More information about the x265-devel
mailing list