[x265] [PATCH] assembly code for pixel_sad_x4_24x32
yuvaraj at multicorewareinc.com
yuvaraj at multicorewareinc.com
Wed Oct 30 11:20:38 CET 2013
# HG changeset patch
# User Yuvaraj Venkatesh <yuvaraj at multicorewareinc.com>
# Date 1383128335 -19800
# Wed Oct 30 15:48:55 2013 +0530
# Node ID df55905965e7679c9dca6371f60253ec90f046fb
# Parent eca1142d1cec9303afad71108494f9076586ce05
assembly code for pixel_sad_x4_24x32
diff -r eca1142d1cec -r df55905965e7 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Wed Oct 30 14:37:25 2013 +0530
+++ b/source/common/x86/asm-primitives.cpp Wed Oct 30 15:48:55 2013 +0530
@@ -293,6 +293,7 @@
p.sad_x3[LUMA_16x64] = x265_pixel_sad_x3_16x64_ssse3;
p.sad_x4[LUMA_16x64] = x265_pixel_sad_x4_16x64_ssse3;
p.sad_x3[LUMA_24x32] = x265_pixel_sad_x3_24x32_ssse3;
+ p.sad_x4[LUMA_24x32] = x265_pixel_sad_x4_24x32_ssse3;
p.luma_hvpp[LUMA_8x8] = x265_interp_8tap_hv_pp_8x8_ssse3;
p.ipfilter_sp[FILTER_V_S_P_8] = x265_interp_8tap_v_sp_ssse3;
@@ -327,6 +328,7 @@
p.sad_x3[LUMA_16x64] = x265_pixel_sad_x3_16x64_avx;
p.sad_x4[LUMA_16x64] = x265_pixel_sad_x4_16x64_avx;
p.sad_x3[LUMA_24x32] = x265_pixel_sad_x3_24x32_avx;
+ p.sad_x4[LUMA_24x32] = x265_pixel_sad_x4_24x32_avx;
}
if (cpuMask & X265_CPU_XOP)
{
diff -r eca1142d1cec -r df55905965e7 source/common/x86/sad-a.asm
--- a/source/common/x86/sad-a.asm Wed Oct 30 14:37:25 2013 +0530
+++ b/source/common/x86/sad-a.asm Wed Oct 30 15:48:55 2013 +0530
@@ -2099,6 +2099,148 @@
lea r3, [r3 + r4]
%endmacro
+%macro SAD_X4_24x4 0
+ mova m4, [r0]
+ mova m5, [r0 + 16]
+ movu m6, [r1]
+ movu m7, [r1 + 16]
+ psadbw m6, m4
+ psadbw m7, m5
+ pshufd m7, m7, 84
+ paddd m6, m7
+ paddd m0, m6
+ movu m6, [r2]
+ movu m7, [r2 + 16]
+ psadbw m6, m4
+ psadbw m7, m5
+ pshufd m7, m7, 84
+ paddd m6, m7
+ paddd m1, m6
+ movu m6, [r3]
+ movu m7, [r3 + 16]
+ psadbw m6, m4
+ psadbw m7, m5
+ pshufd m7, m7, 84
+ paddd m6, m7
+ paddd m2, m6
+ movu m6, [r4]
+ movu m7, [r4 + 16]
+ psadbw m6, m4
+ psadbw m7, m5
+ pshufd m7, m7, 84
+ paddd m6, m7
+ paddd m3, m6
+ lea r0, [r0 + FENC_STRIDE]
+ lea r1, [r1 + r5]
+ lea r2, [r2 + r5]
+ lea r3, [r3 + r5]
+ lea r4, [r4 + r5]
+ mova m4, [r0]
+ mova m5, [r0 + 16]
+ movu m6, [r1]
+ movu m7, [r1 + 16]
+ psadbw m6, m4
+ psadbw m7, m5
+ pshufd m7, m7, 84
+ paddd m6, m7
+ paddd m0, m6
+ movu m6, [r2]
+ movu m7, [r2 + 16]
+ psadbw m6, m4
+ psadbw m7, m5
+ pshufd m7, m7, 84
+ paddd m6, m7
+ paddd m1, m6
+ movu m6, [r3]
+ movu m7, [r3 + 16]
+ psadbw m6, m4
+ psadbw m7, m5
+ pshufd m7, m7, 84
+ paddd m6, m7
+ paddd m2, m6
+ movu m6, [r4]
+ movu m7, [r4 + 16]
+ psadbw m6, m4
+ psadbw m7, m5
+ pshufd m7, m7, 84
+ paddd m6, m7
+ paddd m3, m6
+ lea r0, [r0 + FENC_STRIDE]
+ lea r1, [r1 + r5]
+ lea r2, [r2 + r5]
+ lea r3, [r3 + r5]
+ lea r4, [r4 + r5]
+ mova m4, [r0]
+ mova m5, [r0 + 16]
+ movu m6, [r1]
+ movu m7, [r1 + 16]
+ psadbw m6, m4
+ psadbw m7, m5
+ pshufd m7, m7, 84
+ paddd m6, m7
+ paddd m0, m6
+ movu m6, [r2]
+ movu m7, [r2 + 16]
+ psadbw m6, m4
+ psadbw m7, m5
+ pshufd m7, m7, 84
+ paddd m6, m7
+ paddd m1, m6
+ movu m6, [r3]
+ movu m7, [r3 + 16]
+ psadbw m6, m4
+ psadbw m7, m5
+ pshufd m7, m7, 84
+ paddd m6, m7
+ paddd m2, m6
+ movu m6, [r4]
+ movu m7, [r4 + 16]
+ psadbw m6, m4
+ psadbw m7, m5
+ pshufd m7, m7, 84
+ paddd m6, m7
+ paddd m3, m6
+ lea r0, [r0 + FENC_STRIDE]
+ lea r1, [r1 + r5]
+ lea r2, [r2 + r5]
+ lea r3, [r3 + r5]
+ lea r4, [r4 + r5]
+ mova m4, [r0]
+ mova m5, [r0 + 16]
+ movu m6, [r1]
+ movu m7, [r1 + 16]
+ psadbw m6, m4
+ psadbw m7, m5
+ pshufd m7, m7, 84
+ paddd m6, m7
+ paddd m0, m6
+ movu m6, [r2]
+ movu m7, [r2 + 16]
+ psadbw m6, m4
+ psadbw m7, m5
+ pshufd m7, m7, 84
+ paddd m6, m7
+ paddd m1, m6
+ movu m6, [r3]
+ movu m7, [r3 + 16]
+ psadbw m6, m4
+ psadbw m7, m5
+ pshufd m7, m7, 84
+ paddd m6, m7
+ paddd m2, m6
+ movu m6, [r4]
+ movu m7, [r4 + 16]
+ psadbw m6, m4
+ psadbw m7, m5
+ pshufd m7, m7, 84
+ paddd m6, m7
+ paddd m3, m6
+ lea r0, [r0 + FENC_STRIDE]
+ lea r1, [r1 + r5]
+ lea r2, [r2 + r5]
+ lea r3, [r3 + r5]
+ lea r4, [r4 + r5]
+%endmacro
;-----------------------------------------------------------------------------
; void pixel_sad_x3_16x16( uint8_t *fenc, uint8_t *pix0, uint8_t *pix1,
; uint8_t *pix2, intptr_t i_stride, int scores[3] )
@@ -2136,6 +2278,26 @@
SAD_X3_END_SSE2 1
%endmacro
+%macro SAD_X4_W24 0
+cglobal pixel_sad_x4_24x32, 6, 8, 8
+ pxor m0, m0
+ pxor m1, m1
+ pxor m2, m2
+ pxor m3, m3
+ mov r7, 32
+
+.loop
+ SAD_X4_24x4
+ SAD_X4_24x4
+ SAD_X4_24x4
+ SAD_X4_24x4
+
+ sub r7, 16
+ cmp r7, 0
+jnz .loop
+ SAD_X4_END_SSE2 1
+%endmacro
+
INIT_XMM sse2
SAD_X_SSE2 3, 16, 16, 7
SAD_X_SSE2 3, 16, 8, 7
@@ -2175,6 +2337,7 @@
SAD_X_SSE2 3, 16, 8, 7
SAD_X_SSE2 3, 8, 32, 7
SAD_X_SSE2 3, 8, 16, 7
+SAD_X4_W24
SAD_X_SSE2 4, 16, 64, 7
SAD_X_SSE2 4, 16, 32, 7
SAD_X_SSE2 4, 16, 16, 7
@@ -2193,6 +2356,7 @@
SAD_X_SSE2 3, 16, 12, 6
SAD_X_SSE2 3, 16, 8, 6
SAD_X_SSE2 3, 16, 4, 6
+SAD_X4_W24
SAD_X_SSE2 4, 16, 64, 7
SAD_X_SSE2 4, 16, 32, 7
SAD_X_SSE2 4, 16, 16, 7
More information about the x265-devel
mailing list