[x265] [PATCH] asm: pixel_sse_ss_48x64 assembly routine

yuvaraj at multicorewareinc.com yuvaraj at multicorewareinc.com
Wed Nov 27 12:03:11 CET 2013


# HG changeset patch
# User Yuvaraj Venkatesh <yuvaraj at multicorewareinc.com>
# Date 1385550176 -19800
#      Wed Nov 27 16:32:56 2013 +0530
# Branch stable
# Node ID 1e01336ceb1ab0226e20a0d74bce8dc43dbdf869
# Parent  b1a69974195dc15330abf3c54e5fa7686ddba811
asm: pixel_sse_ss_48x64 assembly routine

diff -r b1a69974195d -r 1e01336ceb1a source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Wed Nov 27 15:55:55 2013 +0530
+++ b/source/common/x86/asm-primitives.cpp	Wed Nov 27 16:32:56 2013 +0530
@@ -109,7 +109,8 @@
     p.sse_ss[LUMA_32x16]   = x265_pixel_ssd_ss_32x16_ ## cpu; \
     p.sse_ss[LUMA_32x24]   = x265_pixel_ssd_ss_32x24_ ## cpu; \
     p.sse_ss[LUMA_32x32]   = x265_pixel_ssd_ss_32x32_ ## cpu; \
-    p.sse_ss[LUMA_32x64]   = x265_pixel_ssd_ss_32x64_ ## cpu;
+    p.sse_ss[LUMA_32x64]   = x265_pixel_ssd_ss_32x64_ ## cpu; \
+    p.sse_ss[LUMA_48x64]   = x265_pixel_ssd_ss_48x64_ ## cpu;
 
 #define SA8D_INTER_FROM_BLOCK(cpu) \
     p.sa8d_inter[LUMA_4x8]  = x265_pixel_satd_4x8_ ## cpu; \
diff -r b1a69974195d -r 1e01336ceb1a source/common/x86/pixel-a.asm
--- a/source/common/x86/pixel-a.asm	Wed Nov 27 15:55:55 2013 +0530
+++ b/source/common/x86/pixel-a.asm	Wed Nov 27 16:32:56 2013 +0530
@@ -517,22 +517,102 @@
     RET
 %endmacro
 
+%macro SSD_SS_48 0
+cglobal pixel_ssd_ss_48x64, 4,7,6
+    FIX_STRIDES r1, r3
+    mov    r4d, 32
+    pxor    m0, m0
+.loop
+    movu    m1, [r0]
+    movu    m2, [r2]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 16]
+    movu    m2, [r2 + 16]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 32]
+    movu    m2, [r2 + 32]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 48]
+    movu    m2, [r2 + 48]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 64]
+    movu    m2, [r2 + 64]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 80]
+    movu    m2, [r2 + 80]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    lea       r0, [r0 + 2*r1]
+    lea       r2, [r2 + 2*r3]
+    movu    m1, [r0]
+    movu    m2, [r2]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 16]
+    movu    m2, [r2 + 16]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 32]
+    movu    m2, [r2 + 32]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 48]
+    movu    m2, [r2 + 48]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 64]
+    movu    m2, [r2 + 64]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    movu    m1, [r0 + 80]
+    movu    m2, [r2 + 80]
+    psubw   m1, m2
+    pmaddwd m1, m1
+    paddd   m0, m1
+    lea       r0, [r0 + 2*r1]
+    lea       r2, [r2 + 2*r3]
+    dec      r4d
+    jnz .loop
+    phaddd    m0, m0
+    phaddd    m0, m0
+    movd     eax, m0
+    RET
+%endmacro
 
 INIT_XMM sse2
 SSD_SS_ONE
 SSD_SS_12x16
 SSD_SS_24
 SSD_SS_32xN
+SSD_SS_48
 INIT_XMM sse4
 SSD_SS_ONE
 SSD_SS_12x16
 SSD_SS_24
 SSD_SS_32xN
+SSD_SS_48
 INIT_XMM avx
 SSD_SS_ONE
 SSD_SS_12x16
 SSD_SS_24
 SSD_SS_32xN
+SSD_SS_48
 %endif ; !HIGH_BIT_DEPTH
 
 %if HIGH_BIT_DEPTH == 0


More information about the x265-devel mailing list