[x265] [PATCH] asm: assembly code for pixel_satd_32x24 and rearranged the functions

yuvaraj at multicorewareinc.com yuvaraj at multicorewareinc.com
Tue Nov 12 07:52:18 CET 2013


# HG changeset patch
# User Yuvaraj Venkatesh <yuvaraj at multicorewareinc.com>
# Date 1384238968 -19800
#      Tue Nov 12 12:19:28 2013 +0530
# Node ID 5f77c48448f52d3dfe2d1f7c1dc265df674ff6db
# Parent  1ca01c82609fbb173a665c31adf07c429806d4f1
asm: assembly code for pixel_satd_32x24 and rearranged the functions

diff -r 1ca01c82609f -r 5f77c48448f5 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Mon Nov 11 15:46:00 2013 +0530
+++ b/source/common/x86/asm-primitives.cpp	Tue Nov 12 12:19:28 2013 +0530
@@ -69,7 +69,6 @@
 #define HEVC_SATD(cpu) \
     HEVC_X64_SATD(cpu) \
     p.satd[LUMA_32x32] = cmp<32, 32, 16, 16, x265_pixel_satd_16x16_ ## cpu>; \
-    p.satd[LUMA_32x24] = cmp<32, 24, 16, 8, x265_pixel_satd_16x8_ ## cpu>; \
     p.satd[LUMA_24x32] = cmp<24, 32, 8, 16, x265_pixel_satd_8x16_ ## cpu>; \
     p.satd[LUMA_64x64] = cmp<64, 64, 16, 16, x265_pixel_satd_16x16_ ## cpu>; \
     p.satd[LUMA_64x32] = cmp<64, 32, 16, 16, x265_pixel_satd_16x16_ ## cpu>; \
@@ -331,6 +330,7 @@
         p.satd[LUMA_12x16] = cmp<12, 16, 4, 16, x265_pixel_satd_4x16_mmx2>;
         p.satd[LUMA_32x8]  = x265_pixel_satd_32x8_sse2;
         p.satd[LUMA_32x16] = x265_pixel_satd_32x16_sse2;
+        p.satd[LUMA_32x24] = x265_pixel_satd_32x24_sse2;
         p.sa8d[BLOCK_4x4]  = x265_pixel_satd_4x4_mmx2;
         p.frame_init_lowres_core = x265_frame_init_lowres_core_mmx2;
 
@@ -481,6 +481,7 @@
         p.satd[LUMA_12x16]  = cmp<12, 16, 4, 16, x265_pixel_satd_4x16_sse4>;
         p.satd[LUMA_32x8] = x265_pixel_satd_32x8_sse4;
         p.satd[LUMA_32x16] = x265_pixel_satd_32x16_sse4;
+        p.satd[LUMA_32x24] = x265_pixel_satd_32x24_sse4;
         p.sa8d[BLOCK_8x8]   = x265_pixel_sa8d_8x8_sse4;
         p.sa8d[BLOCK_16x16] = x265_pixel_sa8d_16x16_sse4;
         SA8D_INTER_FROM_BLOCK(sse4);
@@ -499,6 +500,7 @@
         p.satd[LUMA_12x16]  = cmp<12, 16, 4, 16, x265_pixel_satd_4x16_avx>;
         p.satd[LUMA_32x8] = x265_pixel_satd_32x8_avx;
         p.satd[LUMA_32x16] = x265_pixel_satd_32x16_avx;
+        p.satd[LUMA_32x24] = x265_pixel_satd_32x24_avx;
         p.sa8d[BLOCK_8x8]   = x265_pixel_sa8d_8x8_avx;
         p.sa8d[BLOCK_16x16] = x265_pixel_sa8d_16x16_avx;
         SA8D_INTER_FROM_BLOCK(avx);
diff -r 1ca01c82609f -r 5f77c48448f5 source/common/x86/pixel-a.asm
--- a/source/common/x86/pixel-a.asm	Mon Nov 11 15:46:00 2013 +0530
+++ b/source/common/x86/pixel-a.asm	Tue Nov 12 12:19:28 2013 +0530
@@ -1780,51 +1780,30 @@
     call pixel_satd_16x4_internal
     SATD_END_SSE2 m10
 
-%else
-cglobal pixel_satd_32x16, 4,8,8
-%if WIN64                           ;if WIN64 && cpuflag(avx)
-    SATD_START_SSE2 m6, m7
+cglobal pixel_satd_32x24, 4,8,8    ;if WIN64 && notcpuflag(avx)
+    SATD_START_SSE2 m10, m7
     mov r6, r0
     mov r7, r2
-    call pixel_satd_8x8_internal
-    call pixel_satd_8x8_internal
-    lea r0, [r6 + 8]
-    lea r2, [r7 + 8]
-    call pixel_satd_8x8_internal
-    call pixel_satd_8x8_internal
+%if vertical
+    mova m7, [pw_00ff]
+%endif
+    call pixel_satd_16x4_internal
+    call pixel_satd_16x4_internal
+    call pixel_satd_16x4_internal
+    call pixel_satd_16x4_internal
+    call pixel_satd_16x4_internal
+    call pixel_satd_16x4_internal
     lea r0, [r6 + 16]
     lea r2, [r7 + 16]
-    call pixel_satd_8x8_internal
-    call pixel_satd_8x8_internal
-    lea r0, [r6 + 24]
-    lea r2, [r7 + 24]
-    call pixel_satd_8x8_internal
-    call pixel_satd_8x8_internal
-    SATD_END_SSE2 m6
-%else                               ;if !WIN64
-    SATD_START_SSE2 m6, m7
-    call pixel_satd_8x8_internal
-    call pixel_satd_8x8_internal
-    mov r0, r0mp
-    mov r2, r2mp
-    add r0, 8
-    add r2, 8
-    call pixel_satd_8x8_internal
-    call pixel_satd_8x8_internal
-    mov r0, r0mp
-    mov r2, r2mp
-    add r0, 16
-    add r2, 16
-    call pixel_satd_8x8_internal
-    call pixel_satd_8x8_internal
-    mov r0, r0mp
-    mov r2, r2mp
-    add r0, 24
-    add r2, 24
-    call pixel_satd_8x8_internal
-    call pixel_satd_8x8_internal
-    SATD_END_SSE2 m6
-%endif
+    call pixel_satd_16x4_internal
+    call pixel_satd_16x4_internal
+    call pixel_satd_16x4_internal
+    call pixel_satd_16x4_internal
+    call pixel_satd_16x4_internal
+    call pixel_satd_16x4_internal
+    SATD_END_SSE2 m10
+
+%else
 
 cglobal pixel_satd_32x8, 4,6,8    ;if !WIN64
     SATD_START_SSE2 m6, m7
@@ -1848,6 +1827,106 @@
     call pixel_satd_8x8_internal
     SATD_END_SSE2 m6
 
+%if WIN64
+cglobal pixel_satd_32x16, 4,8,8    ;if WIN64 && cpuflag(avx)
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov r7, r2
+    call pixel_satd_8x8_internal
+    call pixel_satd_8x8_internal
+    lea r0, [r6 + 8]
+    lea r2, [r7 + 8]
+    call pixel_satd_8x8_internal
+    call pixel_satd_8x8_internal
+    lea r0, [r6 + 16]
+    lea r2, [r7 + 16]
+    call pixel_satd_8x8_internal
+    call pixel_satd_8x8_internal
+    lea r0, [r6 + 24]
+    lea r2, [r7 + 24]
+    call pixel_satd_8x8_internal
+    call pixel_satd_8x8_internal
+    SATD_END_SSE2 m6
+%else
+cglobal pixel_satd_32x16, 4,6,8    ;if !WIN64
+    SATD_START_SSE2 m6, m7
+    call pixel_satd_8x8_internal
+    call pixel_satd_8x8_internal
+    mov r0, r0mp
+    mov r2, r2mp
+    add r0, 8
+    add r2, 8
+    call pixel_satd_8x8_internal
+    call pixel_satd_8x8_internal
+    mov r0, r0mp
+    mov r2, r2mp
+    add r0, 16
+    add r2, 16
+    call pixel_satd_8x8_internal
+    call pixel_satd_8x8_internal
+    mov r0, r0mp
+    mov r2, r2mp
+    add r0, 24
+    add r2, 24
+    call pixel_satd_8x8_internal
+    call pixel_satd_8x8_internal
+    SATD_END_SSE2 m6
+%endif
+
+%if WIN64
+cglobal pixel_satd_32x24, 4,8,8    ;if WIN64 && cpuflag(avx)
+    SATD_START_SSE2 m6, m7
+    mov r6, r0
+    mov r7, r2
+    call pixel_satd_8x8_internal
+    call pixel_satd_8x8_internal
+    call pixel_satd_8x8_internal
+    lea r0, [r6 + 8]
+    lea r2, [r7 + 8]
+    call pixel_satd_8x8_internal
+    call pixel_satd_8x8_internal
+    call pixel_satd_8x8_internal
+    lea r0, [r6 + 16]
+    lea r2, [r7 + 16]
+    call pixel_satd_8x8_internal
+    call pixel_satd_8x8_internal
+    call pixel_satd_8x8_internal
+    lea r0, [r6 + 24]
+    lea r2, [r7 + 24]
+    call pixel_satd_8x8_internal
+    call pixel_satd_8x8_internal
+    call pixel_satd_8x8_internal
+    SATD_END_SSE2 m6
+%else
+cglobal pixel_satd_32x24, 4,6,8    ;if !WIN64
+    SATD_START_SSE2 m6, m7
+    call pixel_satd_8x8_internal
+    call pixel_satd_8x8_internal
+    call pixel_satd_8x8_internal
+    mov r0, r0mp
+    mov r2, r2mp
+    add r0, 8
+    add r2, 8
+    call pixel_satd_8x8_internal
+    call pixel_satd_8x8_internal
+    call pixel_satd_8x8_internal
+    mov r0, r0mp
+    mov r2, r2mp
+    add r0, 16
+    add r2, 16
+    call pixel_satd_8x8_internal
+    call pixel_satd_8x8_internal
+    call pixel_satd_8x8_internal
+    mov r0, r0mp
+    mov r2, r2mp
+    add r0, 24
+    add r2, 24
+    call pixel_satd_8x8_internal
+    call pixel_satd_8x8_internal
+    call pixel_satd_8x8_internal
+    SATD_END_SSE2 m6
+%endif
+
 cglobal pixel_satd_16x8, 4,6,8
     SATD_START_SSE2 m6, m7
     BACKUP_POINTERS


More information about the x265-devel mailing list