[x265] [PATCH] asm: assembly code for pixel_sa8d_32x24
yuvaraj at multicorewareinc.com
yuvaraj at multicorewareinc.com
Thu Nov 21 13:24:38 CET 2013
# HG changeset patch
# User Yuvaraj Venkatesh <yuvaraj at multicorewareinc.com>
# Date 1385036660 -19800
# Thu Nov 21 17:54:20 2013 +0530
# Node ID 3a9d5ded76c5629b35937cc0e35f8b8298a4ea1e
# Parent c108b3e1cb1bdf0aea843e3b06549261664fe911
asm: assembly code for pixel_sa8d_32x24
diff -r c108b3e1cb1b -r 3a9d5ded76c5 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Thu Nov 21 16:44:30 2013 +0530
+++ b/source/common/x86/asm-primitives.cpp Thu Nov 21 17:54:20 2013 +0530
@@ -92,7 +92,7 @@
p.sa8d[BLOCK_64x64] = x265_pixel_sa8d_64x64_ ## cpu; \
p.sa8d_inter[LUMA_16x8] = x265_pixel_sa8d_16x8_ ## cpu; \
p.sa8d_inter[LUMA_8x16] = x265_pixel_sa8d_8x16_ ## cpu; \
- p.sa8d_inter[LUMA_32x24] = cmp<32, 24, 8, 8, x265_pixel_sa8d_8x8_ ## cpu>; \
+ p.sa8d_inter[LUMA_32x24] = x265_pixel_sa8d_32x24_ ## cpu; \
p.sa8d_inter[LUMA_24x32] = cmp<24, 32, 8, 8, x265_pixel_sa8d_8x8_ ## cpu>; \
p.sa8d_inter[LUMA_32x8] = cmp<32, 8, 8, 8, x265_pixel_sa8d_8x8_ ## cpu>; \
p.sa8d_inter[LUMA_8x32] = cmp<8, 32, 8, 8, x265_pixel_sa8d_8x8_ ## cpu>; \
diff -r c108b3e1cb1b -r 3a9d5ded76c5 source/common/x86/pixel-a.asm
--- a/source/common/x86/pixel-a.asm Thu Nov 21 16:44:30 2013 +0530
+++ b/source/common/x86/pixel-a.asm Thu Nov 21 17:54:20 2013 +0530
@@ -3682,6 +3682,51 @@
movd eax, m12
RET
+cglobal pixel_sa8d_32x24, 4,8,12
+ FIX_STRIDES r1, r3
+ lea r4, [3*r1]
+ lea r5, [3*r3]
+ pxor m12, m12
+%if vertical == 0
+ mova m7, [hmul_8p]
+%endif
+ SA8D_8x8
+ add r0, 8
+ add r2, 8
+ SA8D_8x8
+ add r0, 8
+ add r2, 8
+ SA8D_8x8
+ add r0, 8
+ add r2, 8
+ SA8D_8x8
+ lea r0, [r0 + r1*8]
+ lea r2, [r2 + r3*8]
+ SA8D_8x8
+ sub r0, 8
+ sub r2, 8
+ SA8D_8x8
+ sub r0, 8
+ sub r2, 8
+ SA8D_8x8
+ sub r0, 8
+ sub r2, 8
+ SA8D_8x8
+ lea r0, [r0 + r1*8]
+ lea r2, [r2 + r3*8]
+ SA8D_8x8
+ add r0, 8
+ add r2, 8
+ SA8D_8x8
+ add r0, 8
+ add r2, 8
+ SA8D_8x8
+ add r0, 8
+ add r2, 8
+ SA8D_8x8
+ movd eax, m12
+ RET
+
cglobal pixel_sa8d_32x32, 4,8,12
FIX_STRIDES r1, r3
lea r4, [3*r1]
@@ -4645,6 +4690,170 @@
mov esp, r6
RET
+cglobal pixel_sa8d_32x24, 4,7,8
+ FIX_STRIDES r1, r3
+ mov r6, esp
+ and esp, ~15
+ sub esp, 64
+
+ lea r4, [r1 + 2*r1]
+ lea r5, [r3 + 2*r3]
+ call pixel_sa8d_8x8_internal2
+ HADDUW m0, m1
+ movd r4d, m0
+ add r4d, 1
+ shr r4d, 1
+ mov dword [esp+36], r4d
+
+ mov r0, [r6+20]
+ mov r2, [r6+28]
+ add r0, 8*SIZEOF_PIXEL
+ add r2, 8*SIZEOF_PIXEL
+ lea r4, [r1 + 2*r1]
+ call pixel_sa8d_8x8_internal2
+ HADDUW m0, m1
+ movd r4d, m0
+ add r4d, 1
+ shr r4d, 1
+ add r4d, dword [esp+36]
+ mov dword [esp+36], r4d
+
+ mov r0, [r6+20]
+ mov r2, [r6+28]
+ add r0, 16*SIZEOF_PIXEL
+ add r2, 16*SIZEOF_PIXEL
+ lea r4, [r1 + 2*r1]
+ call pixel_sa8d_8x8_internal2
+ HADDUW m0, m1
+ movd r4d, m0
+ add r4d, 1
+ shr r4d, 1
+ add r4d, dword [esp+36]
+ mov dword [esp+36], r4d
+
+ mov r0, [r6+20]
+ mov r2, [r6+28]
+ add r0, 24*SIZEOF_PIXEL
+ add r2, 24*SIZEOF_PIXEL
+ lea r4, [r1 + 2*r1]
+ call pixel_sa8d_8x8_internal2
+ HADDUW m0, m1
+ movd r4d, m0
+ add r4d, 1
+ shr r4d, 1
+ add r4d, dword [esp+36]
+ mov dword [esp+36], r4d
+
+ mov r0, [r6+20]
+ mov r2, [r6+28]
+ lea r0, [r0 + r1*8]
+ lea r2, [r2 + r3*8]
+ mov [r6+20], r0
+ mov [r6+28], r2
+ lea r4, [r1 + 2*r1]
+ call pixel_sa8d_8x8_internal2
+ HADDUW m0, m1
+ movd r4d, m0
+ add r4d, 1
+ shr r4d, 1
+ add r4d, dword [esp+36]
+ mov dword [esp+36], r4d
+
+ mov r0, [r6+20]
+ mov r2, [r6+28]
+ add r0, 8*SIZEOF_PIXEL
+ add r2, 8*SIZEOF_PIXEL
+ lea r4, [r1 + 2*r1]
+ call pixel_sa8d_8x8_internal2
+ HADDUW m0, m1
+ movd r4d, m0
+ add r4d, 1
+ shr r4d, 1
+ add r4d, dword [esp+36]
+ mov dword [esp+36], r4d
+
+ mov r0, [r6+20]
+ mov r2, [r6+28]
+ add r0, 16*SIZEOF_PIXEL
+ add r2, 16*SIZEOF_PIXEL
+ lea r4, [r1 + 2*r1]
+ call pixel_sa8d_8x8_internal2
+ HADDUW m0, m1
+ movd r4d, m0
+ add r4d, 1
+ shr r4d, 1
+ add r4d, dword [esp+36]
+ mov dword [esp+36], r4d
+
+ mov r0, [r6+20]
+ mov r2, [r6+28]
+ add r0, 24*SIZEOF_PIXEL
+ add r2, 24*SIZEOF_PIXEL
+ lea r4, [r1 + 2*r1]
+ call pixel_sa8d_8x8_internal2
+ HADDUW m0, m1
+ movd r4d, m0
+ add r4d, 1
+ shr r4d, 1
+ add r4d, dword [esp+36]
+ mov dword [esp+36], r4d
+
+ mov r0, [r6+20]
+ mov r2, [r6+28]
+ lea r0, [r0 + r1*8]
+ lea r2, [r2 + r3*8]
+ mov [r6+20], r0
+ mov [r6+28], r2
+ lea r4, [r1 + 2*r1]
+ call pixel_sa8d_8x8_internal2
+ HADDUW m0, m1
+ movd r4d, m0
+ add r4d, 1
+ shr r4d, 1
+ add r4d, dword [esp+36]
+ mov dword [esp+36], r4d
+
+ mov r0, [r6+20]
+ mov r2, [r6+28]
+ add r0, 8*SIZEOF_PIXEL
+ add r2, 8*SIZEOF_PIXEL
+ lea r4, [r1 + 2*r1]
+ call pixel_sa8d_8x8_internal2
+ HADDUW m0, m1
+ movd r4d, m0
+ add r4d, 1
+ shr r4d, 1
+ add r4d, dword [esp+36]
+ mov dword [esp+36], r4d
+
+ mov r0, [r6+20]
+ mov r2, [r6+28]
+ add r0, 16*SIZEOF_PIXEL
+ add r2, 16*SIZEOF_PIXEL
+ lea r4, [r1 + 2*r1]
+ call pixel_sa8d_8x8_internal2
+ HADDUW m0, m1
+ movd r4d, m0
+ add r4d, 1
+ shr r4d, 1
+ add r4d, dword [esp+36]
+ mov dword [esp+36], r4d
+
+ mov r0, [r6+20]
+ mov r2, [r6+28]
+ add r0, 24*SIZEOF_PIXEL
+ add r2, 24*SIZEOF_PIXEL
+ lea r4, [r1 + 2*r1]
+ call pixel_sa8d_8x8_internal2
+ HADDUW m0, m1
+ movd r4d, m0
+ add r4d, 1
+ shr r4d, 1
+ add r4d, dword [esp+36]
+ mov eax, r4d
+ mov esp, r6
+ RET
+
cglobal pixel_sa8d_32x32, 4,7,8
FIX_STRIDES r1, r3
mov r6, esp
More information about the x265-devel
mailing list