[x265] [PATCH] assembly code for pixel_sad_x3_32xN

yuvaraj at multicorewareinc.com yuvaraj at multicorewareinc.com
Wed Oct 30 14:13:16 CET 2013


# HG changeset patch
# User Yuvaraj Venkatesh <yuvaraj at multicorewareinc.com>
# Date 1383138700 -19800
#      Wed Oct 30 18:41:40 2013 +0530
# Node ID baf2012ba47fa100a3c739ca68a6207fa22eb931
# Parent  df55905965e7679c9dca6371f60253ec90f046fb
assembly code for pixel_sad_x3_32xN

diff -r df55905965e7 -r baf2012ba47f source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Wed Oct 30 15:48:55 2013 +0530
+++ b/source/common/x86/asm-primitives.cpp	Wed Oct 30 18:41:40 2013 +0530
@@ -294,6 +294,11 @@
         p.sad_x4[LUMA_16x64] = x265_pixel_sad_x4_16x64_ssse3;
         p.sad_x3[LUMA_24x32] = x265_pixel_sad_x3_24x32_ssse3;
         p.sad_x4[LUMA_24x32] = x265_pixel_sad_x4_24x32_ssse3;
+        p.sad_x3[LUMA_32x8] = x265_pixel_sad_x3_32x8_ssse3;
+        p.sad_x3[LUMA_32x16] = x265_pixel_sad_x3_32x16_ssse3;
+        p.sad_x3[LUMA_32x24] = x265_pixel_sad_x3_32x24_ssse3;
+        p.sad_x3[LUMA_32x32] = x265_pixel_sad_x3_32x32_ssse3;
+        p.sad_x3[LUMA_32x64] = x265_pixel_sad_x3_32x64_ssse3;
 
         p.luma_hvpp[LUMA_8x8] = x265_interp_8tap_hv_pp_8x8_ssse3;
         p.ipfilter_sp[FILTER_V_S_P_8] = x265_interp_8tap_v_sp_ssse3;
@@ -329,6 +334,11 @@
         p.sad_x4[LUMA_16x64] = x265_pixel_sad_x4_16x64_avx;
         p.sad_x3[LUMA_24x32] = x265_pixel_sad_x3_24x32_avx;
         p.sad_x4[LUMA_24x32] = x265_pixel_sad_x4_24x32_avx;
+        p.sad_x3[LUMA_32x8] = x265_pixel_sad_x3_32x8_avx;
+        p.sad_x3[LUMA_32x16] = x265_pixel_sad_x3_32x16_avx;
+        p.sad_x3[LUMA_32x24] = x265_pixel_sad_x3_32x24_avx;
+        p.sad_x3[LUMA_32x32] = x265_pixel_sad_x3_32x32_avx;
+        p.sad_x3[LUMA_32x64] = x265_pixel_sad_x3_32x64_avx;
     }
     if (cpuMask & X265_CPU_XOP)
     {
diff -r df55905965e7 -r baf2012ba47f source/common/x86/sad-a.asm
--- a/source/common/x86/sad-a.asm	Wed Oct 30 15:48:55 2013 +0530
+++ b/source/common/x86/sad-a.asm	Wed Oct 30 18:41:40 2013 +0530
@@ -2241,6 +2241,106 @@
     lea     r3,  [r3 + r5]
     lea     r4,  [r4 + r5]
 %endmacro
+
+%macro SAD_X3_32x4 0
+    mova    m3,  [r0]
+    mova    m4,  [r0 + 16]
+    movu    m5,  [r1]
+    movu    m6,  [r1 + 16]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    paddd   m5,  m6
+    paddd   m0,  m5
+    movu    m5,  [r2]
+    movu    m6,  [r2 + 16]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    paddd   m5,  m6
+    paddd   m1,  m5
+    movu    m5,  [r3]
+    movu    m6,  [r3 + 16]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    paddd   m5,  m6
+    paddd   m2,  m5
+    lea     r0,  [r0 + FENC_STRIDE]
+    lea     r1,  [r1 + r4]
+    lea     r2,  [r2 + r4]
+    lea     r3,  [r3 + r4]
+    mova    m3,  [r0]
+    mova    m4,  [r0 + 16]
+    movu    m5,  [r1]
+    movu    m6,  [r1 + 16]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    paddd   m5,  m6
+    paddd   m0,  m5
+    movu    m5,  [r2]
+    movu    m6,  [r2 + 16]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    paddd   m5,  m6
+    paddd   m1,  m5
+    movu    m5,  [r3]
+    movu    m6,  [r3 + 16]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    paddd   m5,  m6
+    paddd   m2,  m5
+    lea     r0,  [r0 + FENC_STRIDE]
+    lea     r1,  [r1 + r4]
+    lea     r2,  [r2 + r4]
+    lea     r3,  [r3 + r4]
+    mova    m3,  [r0]
+    mova    m4,  [r0 + 16]
+    movu    m5,  [r1]
+    movu    m6,  [r1 + 16]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    paddd   m5,  m6
+    paddd   m0,  m5
+    movu    m5,  [r2]
+    movu    m6,  [r2 + 16]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    paddd   m5,  m6
+    paddd   m1,  m5
+    movu    m5,  [r3]
+    movu    m6,  [r3 + 16]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    paddd   m5,  m6
+    paddd   m2,  m5
+    lea     r0,  [r0 + FENC_STRIDE]
+    lea     r1,  [r1 + r4]
+    lea     r2,  [r2 + r4]
+    lea     r3,  [r3 + r4]
+    mova    m3,  [r0]
+    mova    m4,  [r0 + 16]
+    movu    m5,  [r1]
+    movu    m6,  [r1 + 16]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    paddd   m5,  m6
+    paddd   m0,  m5
+    movu    m5,  [r2]
+    movu    m6,  [r2 + 16]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    paddd   m5,  m6
+    paddd   m1,  m5
+    movu    m5,  [r3]
+    movu    m6,  [r3 + 16]
+    psadbw  m5,  m3
+    psadbw  m6,  m4
+    paddd   m5,  m6
+    paddd   m2,  m5
+    lea     r0,  [r0 + FENC_STRIDE]
+    lea     r1,  [r1 + r4]
+    lea     r2,  [r2 + r4]
+    lea     r3,  [r3 + r4]
+%endmacro
+
 ;-----------------------------------------------------------------------------
 ; void pixel_sad_x3_16x16( uint8_t *fenc, uint8_t *pix0, uint8_t *pix1,
 ;                          uint8_t *pix2, intptr_t i_stride, int scores[3] )
@@ -2298,6 +2398,75 @@
     SAD_X4_END_SSE2 1
 %endmacro
 
+%macro SAD_X3_W32 0
+cglobal pixel_sad_x3_32x8, 5, 6, 8
+    pxor  m0, m0
+    pxor  m1, m1
+    pxor  m2, m2
+
+    SAD_X3_32x4
+    SAD_X3_32x4
+    SAD_X3_END_SSE2 1
+
+cglobal pixel_sad_x3_32x16, 5, 6, 8
+    pxor  m0, m0
+    pxor  m1, m1
+    pxor  m2, m2
+
+    SAD_X3_32x4
+    SAD_X3_32x4
+    SAD_X3_32x4
+    SAD_X3_32x4
+    SAD_X3_END_SSE2 1
+
+cglobal pixel_sad_x3_32x24, 5, 6, 8
+    pxor  m0, m0
+    pxor  m1, m1
+    pxor  m2, m2
+
+    SAD_X3_32x4
+    SAD_X3_32x4
+    SAD_X3_32x4
+    SAD_X3_32x4
+    SAD_X3_32x4
+    SAD_X3_32x4
+    SAD_X3_END_SSE2 1
+
+cglobal pixel_sad_x3_32x32, 5, 7, 8
+    pxor  m0, m0
+    pxor  m1, m1
+    pxor  m2, m2
+    mov   r6, 32
+
+.loop
+    SAD_X3_32x4
+    SAD_X3_32x4
+    SAD_X3_32x4
+    SAD_X3_32x4
+
+    sub r6,  16
+    cmp r6,  0
+jnz .loop
+    SAD_X3_END_SSE2 1
+
+cglobal pixel_sad_x3_32x64, 5, 7, 8
+    pxor  m0, m0
+    pxor  m1, m1
+    pxor  m2, m2
+    mov   r6, 64
+
+.loop1
+    SAD_X3_32x4
+    SAD_X3_32x4
+    SAD_X3_32x4
+    SAD_X3_32x4
+
+    sub r6,  16
+    cmp r6,  0
+jnz .loop1
+    SAD_X3_END_SSE2 1
+%endmacro
+
 INIT_XMM sse2
 SAD_X_SSE2 3, 16, 16, 7
 SAD_X_SSE2 3, 16,  8, 7
@@ -2329,6 +2498,7 @@
 %endmacro
 
 INIT_XMM ssse3
+SAD_X3_W32
 SAD_X3_W24
 SAD_X_SSE2  3, 16, 64, 7
 SAD_X_SSE2  3, 16, 32, 7
@@ -2349,6 +2519,7 @@
 SAD_X_SSSE3 4,  8,  4
 
 INIT_XMM avx
+SAD_X3_W32
 SAD_X3_W24
 SAD_X_SSE2 3, 16, 64, 7
 SAD_X_SSE2 3, 16, 32, 6


More information about the x265-devel mailing list