[x265] [PATCH] asm: pixel_sse_ss_64xN assembly routine
yuvaraj at multicorewareinc.com
yuvaraj at multicorewareinc.com
Wed Nov 27 12:39:39 CET 2013
# HG changeset patch
# User Yuvaraj Venkatesh <yuvaraj at multicorewareinc.com>
# Date 1385552368 -19800
# Wed Nov 27 17:09:28 2013 +0530
# Branch stable
# Node ID 27ffe85a70b128a7ac6108db1d2a7d1f6dfbb63a
# Parent 1e01336ceb1ab0226e20a0d74bce8dc43dbdf869
asm: pixel_sse_ss_64xN assembly routine
diff -r 1e01336ceb1a -r 27ffe85a70b1 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Wed Nov 27 16:32:56 2013 +0530
+++ b/source/common/x86/asm-primitives.cpp Wed Nov 27 17:09:28 2013 +0530
@@ -110,7 +110,11 @@
p.sse_ss[LUMA_32x24] = x265_pixel_ssd_ss_32x24_ ## cpu; \
p.sse_ss[LUMA_32x32] = x265_pixel_ssd_ss_32x32_ ## cpu; \
p.sse_ss[LUMA_32x64] = x265_pixel_ssd_ss_32x64_ ## cpu; \
- p.sse_ss[LUMA_48x64] = x265_pixel_ssd_ss_48x64_ ## cpu;
+ p.sse_ss[LUMA_48x64] = x265_pixel_ssd_ss_48x64_ ## cpu; \
+ p.sse_ss[LUMA_64x16] = x265_pixel_ssd_ss_64x16_ ## cpu; \
+ p.sse_ss[LUMA_64x32] = x265_pixel_ssd_ss_64x32_ ## cpu; \
+ p.sse_ss[LUMA_64x48] = x265_pixel_ssd_ss_64x48_ ## cpu; \
+ p.sse_ss[LUMA_64x64] = x265_pixel_ssd_ss_64x64_ ## cpu;
#define SA8D_INTER_FROM_BLOCK(cpu) \
p.sa8d_inter[LUMA_4x8] = x265_pixel_satd_4x8_ ## cpu; \
diff -r 1e01336ceb1a -r 27ffe85a70b1 source/common/x86/pixel-a.asm
--- a/source/common/x86/pixel-a.asm Wed Nov 27 16:32:56 2013 +0530
+++ b/source/common/x86/pixel-a.asm Wed Nov 27 17:09:28 2013 +0530
@@ -595,24 +595,132 @@
RET
%endmacro
+%macro SSD_SS_64 1
+cglobal pixel_ssd_ss_64x%1, 4,7,6
+ FIX_STRIDES r1, r3
+ mov r4d, %1/2
+ pxor m0, m0
+.loop
+ movu m1, [r0]
+ movu m2, [r2]
+ psubw m1, m2
+ pmaddwd m1, m1
+ paddd m0, m1
+ movu m1, [r0 + 16]
+ movu m2, [r2 + 16]
+ psubw m1, m2
+ pmaddwd m1, m1
+ paddd m0, m1
+ movu m1, [r0 + 32]
+ movu m2, [r2 + 32]
+ psubw m1, m2
+ pmaddwd m1, m1
+ paddd m0, m1
+ movu m1, [r0 + 48]
+ movu m2, [r2 + 48]
+ psubw m1, m2
+ pmaddwd m1, m1
+ paddd m0, m1
+ movu m1, [r0 + 64]
+ movu m2, [r2 + 64]
+ psubw m1, m2
+ pmaddwd m1, m1
+ paddd m0, m1
+ movu m1, [r0 + 80]
+ movu m2, [r2 + 80]
+ psubw m1, m2
+ pmaddwd m1, m1
+ paddd m0, m1
+ movu m1, [r0 + 96]
+ movu m2, [r2 + 96]
+ psubw m1, m2
+ pmaddwd m1, m1
+ paddd m0, m1
+ movu m1, [r0 + 112]
+ movu m2, [r2 + 112]
+ psubw m1, m2
+ pmaddwd m1, m1
+ paddd m0, m1
+ lea r0, [r0 + 2*r1]
+ lea r2, [r2 + 2*r3]
+ movu m1, [r0]
+ movu m2, [r2]
+ psubw m1, m2
+ pmaddwd m1, m1
+ paddd m0, m1
+ movu m1, [r0 + 16]
+ movu m2, [r2 + 16]
+ psubw m1, m2
+ pmaddwd m1, m1
+ paddd m0, m1
+ movu m1, [r0 + 32]
+ movu m2, [r2 + 32]
+ psubw m1, m2
+ pmaddwd m1, m1
+ paddd m0, m1
+ movu m1, [r0 + 48]
+ movu m2, [r2 + 48]
+ psubw m1, m2
+ pmaddwd m1, m1
+ paddd m0, m1
+ movu m1, [r0 + 64]
+ movu m2, [r2 + 64]
+ psubw m1, m2
+ pmaddwd m1, m1
+ paddd m0, m1
+ movu m1, [r0 + 80]
+ movu m2, [r2 + 80]
+ psubw m1, m2
+ pmaddwd m1, m1
+ paddd m0, m1
+ movu m1, [r0 + 96]
+ movu m2, [r2 + 96]
+ psubw m1, m2
+ pmaddwd m1, m1
+ paddd m0, m1
+ movu m1, [r0 + 112]
+ movu m2, [r2 + 112]
+ psubw m1, m2
+ pmaddwd m1, m1
+ paddd m0, m1
+ lea r0, [r0 + 2*r1]
+ lea r2, [r2 + 2*r3]
+ dec r4d
+ jnz .loop
+ phaddd m0, m0
+ phaddd m0, m0
+ movd eax, m0
+ RET
+%endmacro
+
+%macro SSD_SS_64xN 0
+SSD_SS_64 16
+SSD_SS_64 32
+SSD_SS_64 48
+SSD_SS_64 64
+%endmacro
+
INIT_XMM sse2
SSD_SS_ONE
SSD_SS_12x16
SSD_SS_24
SSD_SS_32xN
SSD_SS_48
+SSD_SS_64xN
INIT_XMM sse4
SSD_SS_ONE
SSD_SS_12x16
SSD_SS_24
SSD_SS_32xN
SSD_SS_48
+SSD_SS_64xN
INIT_XMM avx
SSD_SS_ONE
SSD_SS_12x16
SSD_SS_24
SSD_SS_32xN
SSD_SS_48
+SSD_SS_64xN
%endif ; !HIGH_BIT_DEPTH
%if HIGH_BIT_DEPTH == 0
More information about the x265-devel
mailing list