[x265] Fwd: [PATCH] assembly code for pixel_sad_x3_24x32
Praveen Tiwari
praveen at multicorewareinc.com
Wed Oct 30 16:54:40 CET 2013
---------- Forwarded message ----------
From: <yuvaraj at multicorewareinc.com>
Date: Wed, Oct 30, 2013 at 2:38 PM
Subject: [x265] [PATCH] assembly code for pixel_sad_x3_24x32
To: x265-devel at videolan.org
# HG changeset patch
# User Yuvaraj Venkatesh <yuvaraj at multicorewareinc.com>
# Date 1383124045 -19800
# Wed Oct 30 14:37:25 2013 +0530
# Node ID eca1142d1cec9303afad71108494f9076586ce05
# Parent 65462024832b4498cd9f05a5a81cb6b559bf378b
assembly code for pixel_sad_x3_24x32
diff -r 65462024832b -r eca1142d1cec source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Wed Oct 30 01:54:16 2013
-0500
+++ b/source/common/x86/asm-primitives.cpp Wed Oct 30 14:37:25 2013
+0530
@@ -292,6 +292,7 @@
p.sad_x4[LUMA_16x32] = x265_pixel_sad_x4_16x32_ssse3;
p.sad_x3[LUMA_16x64] = x265_pixel_sad_x3_16x64_ssse3;
p.sad_x4[LUMA_16x64] = x265_pixel_sad_x4_16x64_ssse3;
+ p.sad_x3[LUMA_24x32] = x265_pixel_sad_x3_24x32_ssse3;
p.luma_hvpp[LUMA_8x8] = x265_interp_8tap_hv_pp_8x8_ssse3;
p.ipfilter_sp[FILTER_V_S_P_8] = x265_interp_8tap_v_sp_ssse3;
@@ -325,6 +326,7 @@
p.sad_x4[LUMA_16x32] = x265_pixel_sad_x4_16x32_avx;
p.sad_x3[LUMA_16x64] = x265_pixel_sad_x3_16x64_avx;
p.sad_x4[LUMA_16x64] = x265_pixel_sad_x4_16x64_avx;
+ p.sad_x3[LUMA_24x32] = x265_pixel_sad_x3_24x32_avx;
}
if (cpuMask & X265_CPU_XOP)
{
diff -r 65462024832b -r eca1142d1cec source/common/x86/pixel.h
--- a/source/common/x86/pixel.h Wed Oct 30 01:54:16 2013 -0500
+++ b/source/common/x86/pixel.h Wed Oct 30 14:37:25 2013 +0530
@@ -47,6 +47,7 @@
ret x265_pixel_ ## name ## _32x24_ ## suffix args; \
ret x265_pixel_ ## name ## _32x32_ ## suffix args; \
ret x265_pixel_ ## name ## _32x64_ ## suffix args; \
+ ret x265_pixel_ ## name ## _24x32_ ## suffix args; \
#define DECL_X1(name, suffix) \
DECL_PIXELS(int, name, suffix, (pixel *, intptr_t, pixel *, intptr_t))
diff -r 65462024832b -r eca1142d1cec source/common/x86/sad-a.asm
--- a/source/common/x86/sad-a.asm Wed Oct 30 01:54:16 2013 -0500
+++ b/source/common/x86/sad-a.asm Wed Oct 30 14:37:25 2013 +0530
@@ -1988,6 +1988,117 @@
RET
%endmacro
+%macro SAD_X3_24x4 0
+ mova m3, [r0]
+ mova m4, [r0 + 16]
+ movu m5, [r1]
+ movu m6, [r1 + 16]
+ psadbw m5, m3
+ psadbw m6, m4
+ pshufd m6, m6, 84
+ paddd m5, m6
+ paddd m0, m5
+ movu m5, [r2]
+ movu m6, [r2 + 16]
+ psadbw m5, m3
+ psadbw m6, m4
+ pshufd m6, m6, 84
+ paddd m5, m6
+ paddd m1, m5
+ movu m5, [r3]
+ movu m6, [r3 + 16]
+ psadbw m5, m3
+ psadbw m6, m4
+ pshufd m6, m6, 84
+ paddd m5, m6
+ paddd m2, m5
+ lea r0, [r0 + FENC_STRIDE]
+ lea r1, [r1 + r4]
+ lea r2, [r2 + r4]
+ lea r3, [r3 + r4]
+ mova m3, [r0]
+ mova m4, [r0 + 16]
+ movu m5, [r1]
+ movu m6, [r1 + 16]
+ psadbw m5, m3
+ psadbw m6, m4
+ pshufd m6, m6, 84
+ paddd m5, m6
+ paddd m0, m5
+ movu m5, [r2]
+ movu m6, [r2 + 16]
+ psadbw m5, m3
+ psadbw m6, m4
+ pshufd m6, m6, 84
+ paddd m5, m6
+ paddd m1, m5
+ movu m5, [r3]
+ movu m6, [r3 + 16]
+ psadbw m5, m3
+ psadbw m6, m4
+ pshufd m6, m6, 84
+ paddd m5, m6
+ paddd m2, m5
>>+ lea r0, [r0 + FENC_STRIDE]
>>+ lea r1, [r1 + r4]
>>+ lea r2, [r2 + r4]
>>+ lea r3, [r3 + r4]
>>+ mova m3, [r0]
>>+ mova m4, [r0 + 16]
>>+ movu m5, [r1]
>>+ movu m6, [r1 + 16]
You don't need to load address every time. you can calculate it like
mova m4, [r0 + 2 * r4]
mova m4, [r0 + 4 * r4]
mova m4, [r0 + 8 * r4]
or even like
mova m4, [r0 + 2 * r4 + constant]
use this concept to eliminate lea instructions. Multiplication with 1, 2, 4
and 8 are allowed.
+ psadbw m5, m3
+ psadbw m6, m4
+ pshufd m6, m6, 84
+ paddd m5, m6
+ paddd m0, m5
+ movu m5, [r2]
+ movu m6, [r2 + 16]
+ psadbw m5, m3
+ psadbw m6, m4
+ pshufd m6, m6, 84
+ paddd m5, m6
+ paddd m1, m5
+ movu m5, [r3]
+ movu m6, [r3 + 16]
+ psadbw m5, m3
+ psadbw m6, m4
+ pshufd m6, m6, 84
+ paddd m5, m6
+ paddd m2, m5
+ lea r0, [r0 + FENC_STRIDE]
+ lea r1, [r1 + r4]
+ lea r2, [r2 + r4]
+ lea r3, [r3 + r4]
+ mova m3, [r0]
+ mova m4, [r0 + 16]
+ movu m5, [r1]
+ movu m6, [r1 + 16]
+ psadbw m5, m3
+ psadbw m6, m4
+ pshufd m6, m6, 84
+ paddd m5, m6
+ paddd m0, m5
+ movu m5, [r2]
+ movu m6, [r2 + 16]
+ psadbw m5, m3
+ psadbw m6, m4
+ pshufd m6, m6, 84
+ paddd m5, m6
+ paddd m1, m5
+ movu m5, [r3]
+ movu m6, [r3 + 16]
+ psadbw m5, m3
+ psadbw m6, m4
+ pshufd m6, m6, 84
+ paddd m5, m6
+ paddd m2, m5
+ lea r0, [r0 + FENC_STRIDE]
+ lea r1, [r1 + r4]
+ lea r2, [r2 + r4]
+ lea r3, [r3 + r4]
+%endmacro
+
;-----------------------------------------------------------------------------
; void pixel_sad_x3_16x16( uint8_t *fenc, uint8_t *pix0, uint8_t *pix1,
; uint8_t *pix2, intptr_t i_stride, int scores[3]
)
@@ -2006,6 +2117,25 @@
%endif
%endmacro
+%macro SAD_X3_W24 0
+cglobal pixel_sad_x3_24x32, 5, 6, 8
+ pxor m0, m0
+ pxor m1, m1
+ pxor m2, m2
+ mov r6, 32
+
+.loop
+ SAD_X3_24x4
+ SAD_X3_24x4
+ SAD_X3_24x4
+ SAD_X3_24x4
+
+ sub r6, 16
+ cmp r6, 0
+jnz .loop
+ SAD_X3_END_SSE2 1
+%endmacro
+
INIT_XMM sse2
SAD_X_SSE2 3, 16, 16, 7
SAD_X_SSE2 3, 16, 8, 7
@@ -2037,6 +2167,7 @@
%endmacro
INIT_XMM ssse3
+SAD_X3_W24
SAD_X_SSE2 3, 16, 64, 7
SAD_X_SSE2 3, 16, 32, 7
SAD_X_SSE2 3, 16, 16, 7
@@ -2055,6 +2186,7 @@
SAD_X_SSSE3 4, 8, 4
INIT_XMM avx
+SAD_X3_W24
SAD_X_SSE2 3, 16, 64, 7
SAD_X_SSE2 3, 16, 32, 6
SAD_X_SSE2 3, 16, 16, 6
_______________________________________________
x265-devel mailing list
x265-devel at videolan.org
https://mailman.videolan.org/listinfo/x265-devel
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mailman.videolan.org/pipermail/x265-devel/attachments/20131030/e27fc681/attachment-0001.html>
More information about the x265-devel
mailing list