<div dir="ltr"><br><div class="gmail_extra"><br><br><div class="gmail_quote">On Mon, Nov 11, 2013 at 3:47 PM, <span dir="ltr"><<a href="mailto:murugan@multicorewareinc.com" target="_blank">murugan@multicorewareinc.com</a>></span> wrote:<br>
<blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex"># HG changeset patch<br>
# User Murugan Vairavel <<a href="mailto:murugan@multicorewareinc.com">murugan@multicorewareinc.com</a>><br>
# Date 1384206405 -19800<br>
# Tue Nov 12 03:16:45 2013 +0530<br>
# Node ID 85749f42e3fa7c03ce29903d1457dc0ed873c120<br>
# Parent 9642b5b6500b5553ab3ce70a360aaaadad5d7234<br>
asm: pixelsub_ps routine for all block sizes<br></blockquote><div><br></div><div>I have this patch sitting in my patch queue, waiting for the change to asm-primitives.cpp and other files that will make the encoder actually use these new functions; and remove the old functions.</div>
<div> </div><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">
diff -r 9642b5b6500b -r 85749f42e3fa source/common/x86/pixel-a.asm<br>
--- a/source/common/x86/pixel-a.asm Mon Nov 11 17:41:32 2013 +0800<br>
+++ b/source/common/x86/pixel-a.asm Tue Nov 12 03:16:45 2013 +0530<br>
@@ -5284,3 +5284,1060 @@<br>
jl .loop<br>
movifnidn eax, r0d<br>
RET<br>
+<br>
+;-----------------------------------------------------------------------------<br>
+; void pixel_sub_ps_c_2x4(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);<br>
+;-----------------------------------------------------------------------------<br>
+INIT_XMM sse4<br>
+cglobal pixel_sub_ps_2x4, 6, 6, 4, dest, deststride, src0, src1, srcstride0, srcstride1<br>
+<br>
+add r1, r1<br>
+<br>
+movd m0, [r2]<br>
+pinsrw m0, [r2 + r4], 2<br>
+movd m1, [r2 + 2 * r4]<br>
+<br>
+movd m2, [r3]<br>
+pinsrw m2, [r3 + r5], 2<br>
+movd m3, [r3 + 2 * r5]<br>
+<br>
+lea r2, [r2 + 2 * r4]<br>
+lea r3, [r3 + 2 * r5]<br>
+<br>
+pinsrw m1, [r2 + r4], 2<br>
+pinsrw m3, [r3 + r5], 2<br>
+<br>
+pmovzxbw m0, m0<br>
+pmovzxbw m1, m1<br>
+pmovzxbw m2, m2<br>
+pmovzxbw m3, m3<br>
+<br>
+psubw m0, m2<br>
+psubw m1, m3<br>
+<br>
+movd [r0], m0<br>
+pextrd [r0 + r1], m0, 2<br>
+movd [r0 + 2* r1], m1<br>
+<br>
+lea r0, [r0 + 2 * r1]<br>
+<br>
+pextrd [r0 + r1], m1, 2<br>
+<br>
+RET<br>
+<br>
+;-----------------------------------------------------------------------------<br>
+; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);<br>
+;-----------------------------------------------------------------------------<br>
+%macro PIXELSUB_PS_W2_H4 2<br>
+INIT_XMM sse4<br>
+cglobal pixel_sub_ps_%1x%2, 6, 7, 4, dest, deststride, src0, src1, srcstride0, srcstride1<br>
+<br>
+add r1, r1<br>
+mov r6d, %2/4<br>
+<br>
+.loop<br>
+<br>
+ movd m0, [r2]<br>
+ pinsrw m0, [r2 + r4], 2<br>
+ movd m1, [r2 + 2 * r4]<br>
+<br>
+ movd m2, [r3]<br>
+ pinsrw m2, [r3 + r5], 2<br>
+ movd m3, [r3 + 2 * r5]<br>
+<br>
+ lea r2, [r2 + 2 * r4]<br>
+ lea r3, [r3 + 2 * r5]<br>
+<br>
+ pinsrw m1, [r2 + r4], 2<br>
+ pinsrw m3, [r3 + r5], 2<br>
+<br>
+ pmovzxbw m0, m0<br>
+ pmovzxbw m1, m1<br>
+ pmovzxbw m2, m2<br>
+ pmovzxbw m3, m3<br>
+<br>
+ psubw m0, m2<br>
+ psubw m1, m3<br>
+<br>
+ movd [r0], m0<br>
+ pextrd [r0 + r1], m0, 2<br>
+ movd [r0 + 2* r1], m1<br>
+<br>
+ lea r0, [r0 + 2 * r1]<br>
+<br>
+ pextrd [r0 + r1], m1, 2<br>
+<br>
+ lea r2, [r2 + 2 * r4]<br>
+ lea r3, [r3 + 2 * r5]<br>
+ lea r0, [r0 + 2 * r1]<br>
+<br>
+ dec r6d<br>
+<br>
+jnz .loop<br>
+<br>
+RET<br>
+%endmacro<br>
+<br>
+PIXELSUB_PS_W2_H4 2, 8<br>
+<br>
+;-----------------------------------------------------------------------------<br>
+; void pixel_sub_sp_c_4x2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);<br>
+;-----------------------------------------------------------------------------<br>
+INIT_XMM sse4<br>
+cglobal pixel_sub_ps_4x2, 6, 6, 4, dest, deststride, src0, src1, srcstride0, srcstride1<br>
+<br>
+add r1, r1<br>
+<br>
+movd m0, [r2]<br>
+movd m1, [r3]<br>
+<br>
+movd m2, [r2 + r4]<br>
+movd m3, [r3 + r5]<br>
+<br>
+punpckldq m0, m2<br>
+punpckldq m1, m3<br>
+pmovzxbw m0, m0<br>
+pmovzxbw m1, m1<br>
+<br>
+psubw m0, m1<br>
+<br>
+movlps [r0], m0<br>
+movhps [r0 + r1], m0<br>
+<br>
+RET<br>
+<br>
+;-----------------------------------------------------------------------------<br>
+; void pixel_sub_ps_c_4x4(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);<br>
+;-----------------------------------------------------------------------------<br>
+INIT_XMM sse4<br>
+cglobal pixel_sub_ps_4x4, 6, 6, 8, dest, deststride, src0, src1, srcstride0, srcstride1<br>
+<br>
+add r1, r1<br>
+<br>
+movd m0, [r2]<br>
+movd m1, [r3]<br>
+<br>
+movd m2, [r2 + r4]<br>
+movd m3, [r3 + r5]<br>
+<br>
+movd m4, [r2 + 2 * r4]<br>
+movd m5, [r3 + 2 * r5]<br>
+<br>
+lea r2, [r2 + 2 * r4]<br>
+lea r3, [r3 + 2 * r5]<br>
+<br>
+movd m6, [r2 + r4]<br>
+movd m7, [r3 + r5]<br>
+<br>
+punpckldq m0, m2<br>
+punpckldq m1, m3<br>
+punpckldq m4, m6<br>
+punpckldq m5, m7<br>
+<br>
+pmovzxbw m0, m0<br>
+pmovzxbw m1, m1<br>
+pmovzxbw m4, m4<br>
+pmovzxbw m5, m5<br>
+<br>
+psubw m0, m1<br>
+psubw m4, m5<br>
+<br>
+movlps [r0], m0<br>
+movhps [r0 + r1], m0<br>
+movlps [r0 + 2 * r1], m4<br>
+<br>
+lea r0, [r0 + 2 * r1]<br>
+<br>
+movhps [r0 + r1], m4<br>
+<br>
+RET<br>
+<br>
+;-----------------------------------------------------------------------------<br>
+; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);<br>
+;-----------------------------------------------------------------------------<br>
+%macro PIXELSUB_PS_W4_H4 2<br>
+INIT_XMM sse4<br>
+cglobal pixel_sub_ps_%1x%2, 6, 7, 8, dest, deststride, src0, src1, srcstride0, srcstride1<br>
+<br>
+add r1, r1<br>
+mov r6d, %2/4<br>
+<br>
+.loop<br>
+<br>
+ movd m0, [r2]<br>
+ movd m1, [r3]<br>
+<br>
+ movd m2, [r2 + r4]<br>
+ movd m3, [r3 + r5]<br>
+<br>
+ movd m4, [r2 + 2 * r4]<br>
+ movd m5, [r3 + 2 * r5]<br>
+<br>
+ lea r2, [r2 + 2 * r4]<br>
+ lea r3, [r3 + 2 * r5]<br>
+<br>
+ movd m6, [r2 + r4]<br>
+ movd m7, [r3 + r5]<br>
+<br>
+ punpckldq m0, m2<br>
+ punpckldq m1, m3<br>
+ punpckldq m4, m6<br>
+ punpckldq m5, m7<br>
+<br>
+ pmovzxbw m0, m0<br>
+ pmovzxbw m1, m1<br>
+ pmovzxbw m4, m4<br>
+ pmovzxbw m5, m5<br>
+<br>
+ psubw m0, m1<br>
+ psubw m4, m5<br>
+<br>
+ movlps [r0], m0<br>
+ movhps [r0 + r1], m0<br>
+ movlps [r0 + 2 * r1], m4<br>
+<br>
+ lea r0, [r0 + 2 * r1]<br>
+<br>
+ movhps [r0 + r1], m4<br>
+<br>
+ lea r2, [r2 + 2 * r4]<br>
+ lea r3, [r3 + 2 * r5]<br>
+ lea r0, [r0 + 2 * r1]<br>
+<br>
+ dec r6d<br>
+<br>
+jnz .loop<br>
+<br>
+RET<br>
+%endmacro<br>
+<br>
+PIXELSUB_PS_W4_H4 4, 8<br>
+PIXELSUB_PS_W4_H4 4, 16<br>
+<br>
+;-----------------------------------------------------------------------------<br>
+; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);<br>
+;-----------------------------------------------------------------------------<br>
+%macro PIXELSUB_PS_W6_H4 2<br>
+INIT_XMM sse4<br>
+cglobal pixel_sub_ps_%1x%2, 6, 7, 8, dest, deststride, src0, src1, srcstride0, srcstride1<br>
+<br>
+add r1, r1<br>
+mov r6d, %2/4<br>
+<br>
+.loop<br>
+<br>
+ movh m0, [r2]<br>
+ movh m1, [r3]<br>
+<br>
+ movh m2, [r2 + r4]<br>
+ movh m3, [r3 + r5]<br>
+<br>
+ movh m4, [r2 + 2 * r4]<br>
+ movh m5, [r3 + 2 * r5]<br>
+<br>
+ lea r2, [r2 + 2 * r4]<br>
+ lea r3, [r3 + 2 * r5]<br>
+<br>
+ movh m6, [r2 + r4]<br>
+ movh m7, [r3 + r5]<br>
+<br>
+ pmovzxbw m0, m0<br>
+ pmovzxbw m1, m1<br>
+ pmovzxbw m2, m2<br>
+ pmovzxbw m3, m3<br>
+ pmovzxbw m4, m4<br>
+ pmovzxbw m5, m5<br>
+ pmovzxbw m6, m6<br>
+ pmovzxbw m7, m7<br>
+<br>
+ psubw m0, m1<br>
+ psubw m2, m3<br>
+ psubw m4, m5<br>
+ psubw m6, m7<br>
+<br>
+ movh [r0], m0<br>
+ pextrd [r0 + 8], m0, 2<br>
+ movh [r0 + r1], m2<br>
+ pextrd [r0 + r1 + 8], m2, 2<br>
+ movh [r0 + 2* r1], m4<br>
+ pextrd [r0 + 2 * r1 + 8], m4, 2<br>
+<br>
+ lea r0, [r0 + 2 * r1]<br>
+<br>
+ movh [r0 + r1], m6<br>
+ pextrd [r0 + r1 + 8], m6, 2<br>
+<br>
+ lea r2, [r2 + 2 * r4]<br>
+ lea r3, [r3 + 2 * r5]<br>
+ lea r0, [r0 + 2 * r1]<br>
+<br>
+ dec r6d<br>
+<br>
+jnz .loop<br>
+<br>
+RET<br>
+%endmacro<br>
+<br>
+PIXELSUB_PS_W6_H4 6, 8<br>
+<br>
+;-----------------------------------------------------------------------------<br>
+; void pixel_sub_ps_c_8x2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);<br>
+;-----------------------------------------------------------------------------<br>
+INIT_XMM sse4<br>
+cglobal pixel_sub_ps_8x2, 6, 6, 4, dest, deststride, src0, src1, srcstride0, srcstride1<br>
+<br>
+add r1, r1<br>
+<br>
+movh m0, [r2]<br>
+movh m1, [r3]<br>
+pmovzxbw m0, m0<br>
+pmovzxbw m1, m1<br>
+<br>
+movh m2, [r2 + r4]<br>
+movh m3, [r3 + r5]<br>
+pmovzxbw m2, m2<br>
+pmovzxbw m3, m3<br>
+<br>
+psubw m0, m1<br>
+psubw m2, m3<br>
+<br>
+movu [r0], m0<br>
+movu [r0 + r1], m2<br>
+<br>
+RET<br>
+<br>
+;-----------------------------------------------------------------------------<br>
+; void pixel_sub_ps_c_8x4(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);<br>
+;-----------------------------------------------------------------------------<br>
+INIT_XMM sse4<br>
+cglobal pixel_sub_ps_8x4, 6, 6, 8, dest, deststride, src0, src1, srcstride0, srcstride1<br>
+<br>
+add r1, r1<br>
+<br>
+movh m0, [r2]<br>
+movh m1, [r3]<br>
+pmovzxbw m0, m0<br>
+pmovzxbw m1, m1<br>
+<br>
+movh m2, [r2 + r4]<br>
+movh m3, [r3 + r5]<br>
+pmovzxbw m2, m2<br>
+pmovzxbw m3, m3<br>
+<br>
+movh m4, [r2 + 2 * r4]<br>
+movh m5, [r3 + 2 * r5]<br>
+pmovzxbw m4, m4<br>
+pmovzxbw m5, m5<br>
+<br>
+psubw m0, m1<br>
+psubw m2, m3<br>
+psubw m4, m5<br>
+<br>
+lea r2, [r2 + 2 * r4]<br>
+lea r3, [r3 + 2 * r5]<br>
+<br>
+movh m6, [r2 + r4]<br>
+movh m7, [r3 + r5]<br>
+pmovzxbw m6, m6<br>
+pmovzxbw m7, m7<br>
+<br>
+psubw m6, m7<br>
+<br>
+movu [r0], m0<br>
+movu [r0 + r1], m2<br>
+movu [r0 + 2 * r1], m4<br>
+<br>
+lea r0, [r0 + 2 * r1]<br>
+<br>
+movu [r0 + r1], m6<br>
+<br>
+RET<br>
+<br>
+;-----------------------------------------------------------------------------<br>
+; void pixel_sub_ps_c_8x6(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);<br>
+;-----------------------------------------------------------------------------<br>
+INIT_XMM sse4<br>
+cglobal pixel_sub_ps_8x6, 6, 6, 8, dest, deststride, src0, src1, srcstride0, srcstride1<br>
+<br>
+add r1, r1<br>
+<br>
+movh m0, [r2]<br>
+movh m1, [r3]<br>
+pmovzxbw m0, m0<br>
+pmovzxbw m1, m1<br>
+<br>
+movh m2, [r2 + r4]<br>
+movh m3, [r3 + r5]<br>
+pmovzxbw m2, m2<br>
+pmovzxbw m3, m3<br>
+<br>
+movh m4, [r2 + 2 * r4]<br>
+movh m5, [r3 + 2 * r5]<br>
+pmovzxbw m4, m4<br>
+pmovzxbw m5, m5<br>
+<br>
+psubw m0, m1<br>
+psubw m2, m3<br>
+psubw m4, m5<br>
+<br>
+lea r2, [r2 + 2 * r4]<br>
+lea r3, [r3 + 2 * r5]<br>
+<br>
+movh m6, [r2 + r4]<br>
+movh m7, [r3 + r5]<br>
+pmovzxbw m6, m6<br>
+pmovzxbw m7, m7<br>
+<br>
+movh m1, [r2 + 2 * r4]<br>
+movh m3, [r3 + 2 * r5]<br>
+pmovzxbw m1, m1<br>
+pmovzxbw m3, m3<br>
+<br>
+psubw m6, m7<br>
+psubw m1, m3<br>
+<br>
+lea r2, [r2 + 2 * r4]<br>
+lea r3, [r3 + 2 * r5]<br>
+<br>
+movh m3, [r2 + r4]<br>
+movh m5, [r3 + r5]<br>
+pmovzxbw m3, m3<br>
+pmovzxbw m5, m5<br>
+<br>
+psubw m3, m5<br>
+<br>
+movu [r0], m0<br>
+movu [r0 + r1], m2<br>
+movu [r0 + 2 * r1], m4<br>
+<br>
+lea r0, [r0 + 2 * r1]<br>
+<br>
+movu [r0 + r1], m6<br>
+movu [r0 + 2 * r1], m1<br>
+<br>
+lea r0, [r0 + 2 * r1]<br>
+<br>
+movu [r0 + r1], m3<br>
+<br>
+RET<br>
+<br>
+;-----------------------------------------------------------------------------<br>
+; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);<br>
+;-----------------------------------------------------------------------------<br>
+%macro PIXELSUB_PS_W8_H4 2<br>
+INIT_XMM sse4<br>
+cglobal pixel_sub_ps_%1x%2, 6, 7, 8, dest, deststride, src0, src1, srcstride0, srcstride1<br>
+<br>
+add r1, r1<br>
+mov r6d, %2/4<br>
+<br>
+.loop<br>
+<br>
+ movh m0, [r2]<br>
+ movh m1, [r3]<br>
+ pmovzxbw m0, m0<br>
+ pmovzxbw m1, m1<br>
+<br>
+ movh m2, [r2 + r4]<br>
+ movh m3, [r3 + r5]<br>
+ pmovzxbw m2, m2<br>
+ pmovzxbw m3, m3<br>
+<br>
+ movh m4, [r2 + 2 * r4]<br>
+ movh m5, [r3 + 2 * r5]<br>
+ pmovzxbw m4, m4<br>
+ pmovzxbw m5, m5<br>
+<br>
+ psubw m0, m1<br>
+ psubw m2, m3<br>
+ psubw m4, m5<br>
+<br>
+ lea r2, [r2 + 2 * r4]<br>
+ lea r3, [r3 + 2 * r5]<br>
+<br>
+ movh m6, [r2 + r4]<br>
+ movh m7, [r3 + r5]<br>
+ pmovzxbw m6, m6<br>
+ pmovzxbw m7, m7<br>
+<br>
+ psubw m6, m7<br>
+<br>
+ movu [r0], m0<br>
+ movu [r0 + r1], m2<br>
+ movu [r0 + 2 * r1], m4<br>
+<br>
+ lea r0, [r0 + 2 * r1]<br>
+<br>
+ movu [r0 + r1], m6<br>
+<br>
+ lea r2, [r2 + 2 * r4]<br>
+ lea r3, [r3 + 2 * r5]<br>
+ lea r0, [r0 + 2 * r1]<br>
+<br>
+ dec r6d<br>
+<br>
+jnz .loop<br>
+<br>
+RET<br>
+%endmacro<br>
+<br>
+PIXELSUB_PS_W8_H4 8, 8<br>
+PIXELSUB_PS_W8_H4 8, 16<br>
+PIXELSUB_PS_W8_H4 8, 32<br>
+<br>
+;-----------------------------------------------------------------------------<br>
+; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);<br>
+;-----------------------------------------------------------------------------<br>
+%macro PIXELSUB_PS_W12_H4 2<br>
+INIT_XMM sse4<br>
+cglobal pixel_sub_ps_%1x%2, 6, 7, 6, dest, deststride, src0, src1, srcstride0, srcstride1<br>
+<br>
+add r1, r1<br>
+mov r6d, %2/4<br>
+<br>
+.loop<br>
+<br>
+ movu m0, [r2]<br>
+ movu m1, [r3]<br>
+ movu m2, [r2 + r4]<br>
+ movu m3, [r3 + r5]<br>
+<br>
+ mova m4, m0<br>
+ mova m5, m1<br>
+ punpckhdq m4, m2<br>
+ punpckhdq m5, m3<br>
+<br>
+ pmovzxbw m0, m0<br>
+ pmovzxbw m1, m1<br>
+ pmovzxbw m2, m2<br>
+ pmovzxbw m3, m3<br>
+ pmovzxbw m4, m4<br>
+ pmovzxbw m5, m5<br>
+<br>
+ psubw m0, m1<br>
+ psubw m2, m3<br>
+ psubw m4, m5<br>
+<br>
+ movu [r0], m0<br>
+ movlps [r0 + 16], m4<br>
+ movu [r0 + r1], m2<br>
+ movhps [r0 + r1 + 16], m4<br>
+<br>
+ movu m0, [r2 + 2 * r4]<br>
+ movu m1, [r3 + 2 * r5]<br>
+<br>
+ lea r2, [r2 + 2 * r4]<br>
+ lea r3, [r3 + 2 * r5]<br>
+<br>
+ movu m2, [r2 + r4]<br>
+ movu m3, [r3 + r5]<br>
+<br>
+ mova m4, m0<br>
+ mova m5, m1<br>
+ punpckhdq m4, m2<br>
+ punpckhdq m5, m3<br>
+<br>
+ pmovzxbw m0, m0<br>
+ pmovzxbw m1, m1<br>
+ pmovzxbw m2, m2<br>
+ pmovzxbw m3, m3<br>
+ pmovzxbw m4, m4<br>
+ pmovzxbw m5, m5<br>
+<br>
+ psubw m0, m1<br>
+ psubw m2, m3<br>
+ psubw m4, m5<br>
+<br>
+ movu [r0 + 2 * r1], m0<br>
+ movlps [r0 + 2 * r1 + 16], m4<br>
+<br>
+ lea r0, [r0 + 2 * r1]<br>
+<br>
+ movu [r0 + r1], m2<br>
+ movhps [r0 + r1 + 16], m4<br>
+<br>
+ lea r2, [r2 + 2 * r4]<br>
+ lea r3, [r3 + 2 * r5]<br>
+ lea r0, [r0 + 2 * r1]<br>
+<br>
+ dec r6d<br>
+<br>
+jnz .loop<br>
+<br>
+RET<br>
+%endmacro<br>
+<br>
+PIXELSUB_PS_W12_H4 12, 16<br>
+<br>
+;-----------------------------------------------------------------------------<br>
+; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);<br>
+;-----------------------------------------------------------------------------<br>
+%macro PIXELSUB_PS_W16_H4 2<br>
+INIT_XMM sse4<br>
+cglobal pixel_sub_ps_%1x%2, 6, 7, 7, dest, deststride, src0, src1, srcstride0, srcstride1<br>
+<br>
+add r1, r1<br>
+mov r6d, %2/4<br>
+pxor m6, m6<br>
+<br>
+.loop<br>
+<br>
+ movu m1, [r2]<br>
+ pmovzxbw m0, m1<br>
+ punpckhbw m1, m6<br>
+ movu m3, [r3]<br>
+ pmovzxbw m2, m3<br>
+ punpckhbw m3, m6<br>
+<br>
+ psubw m0, m2<br>
+ psubw m1, m3<br>
+<br>
+ movu m5, [r2 + r4]<br>
+ pmovzxbw m4, m5<br>
+ punpckhbw m5, m6<br>
+ movu m3, [r3 + r5]<br>
+ pmovzxbw m2, m3<br>
+ punpckhbw m3, m6<br>
+<br>
+ psubw m4, m2<br>
+ psubw m5, m3<br>
+<br>
+ movu [r0], m0<br>
+ movu [r0 + 16], m1<br>
+ movu [r0 + r1], m4<br>
+ movu [r0 + r1 + 16], m5<br>
+<br>
+ movu m1, [r2 + 2 * r4]<br>
+ pmovzxbw m0, m1<br>
+ punpckhbw m1, m6<br>
+ movu m3, [r3 + 2 * r5]<br>
+ pmovzxbw m2, m3<br>
+ punpckhbw m3, m6<br>
+<br>
+ lea r2, [r2 + 2 * r4]<br>
+ lea r3, [r3 + 2 * r5]<br>
+<br>
+ psubw m0, m2<br>
+ psubw m1, m3<br>
+<br>
+ movu m5, [r2 + r4]<br>
+ pmovzxbw m4, m5<br>
+ punpckhbw m5, m6<br>
+ movu m3, [r3 + r5]<br>
+ pmovzxbw m2, m3<br>
+ punpckhbw m3, m6<br>
+<br>
+ psubw m4, m2<br>
+ psubw m5, m3<br>
+<br>
+ movu [r0 + 2 * r1], m0<br>
+ movu [r0 + 2 * r1 + 16], m1<br>
+<br>
+ lea r0, [r0 + 2 * r1]<br>
+<br>
+ movu [r0 + r1], m4<br>
+ movu [r0 + r1 + 16], m5<br>
+<br>
+ lea r2, [r2 + 2 * r4]<br>
+ lea r3, [r3 + 2 * r5]<br>
+ lea r0, [r0 + 2 * r1]<br>
+<br>
+ dec r6d<br>
+<br>
+jnz .loop<br>
+<br>
+RET<br>
+%endmacro<br>
+<br>
+PIXELSUB_PS_W16_H4 16, 4<br>
+PIXELSUB_PS_W16_H4 16, 8<br>
+PIXELSUB_PS_W16_H4 16, 12<br>
+PIXELSUB_PS_W16_H4 16, 16<br>
+PIXELSUB_PS_W16_H4 16, 32<br>
+PIXELSUB_PS_W16_H4 16, 64<br>
+<br>
+;-----------------------------------------------------------------------------<br>
+; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);<br>
+;-----------------------------------------------------------------------------<br>
+%macro PIXELSUB_PS_W24_H2 2<br>
+INIT_XMM sse4<br>
+cglobal pixel_sub_ps_%1x%2, 6, 7, 7, dest, deststride, src0, src1, srcstride0, srcstride1<br>
+<br>
+add r1, r1<br>
+mov r6d, %2/2<br>
+pxor m6, m6<br>
+<br>
+.loop<br>
+<br>
+ movu m1, [r2]<br>
+ pmovzxbw m0, m1<br>
+ punpckhbw m1, m6<br>
+ movh m2, [r2 + 16]<br>
+ pmovzxbw m2, m2<br>
+ movu m4, [r3]<br>
+ pmovzxbw m3, m4<br>
+ punpckhbw m4, m6<br>
+ movh m5, [r3 + 16]<br>
+ pmovzxbw m5, m5<br>
+<br>
+ psubw m0, m3<br>
+ psubw m1, m4<br>
+ psubw m2, m5<br>
+<br>
+ movu [r0], m0<br>
+ movu [r0 + 16], m1<br>
+ movu [r0 + 32], m2<br>
+<br>
+ movu m1, [r2 + r4]<br>
+ pmovzxbw m0, m1<br>
+ punpckhbw m1, m6<br>
+ movh m2, [r2 + r4 + 16]<br>
+ pmovzxbw m2, m2<br>
+ movu m4, [r3 + r5]<br>
+ pmovzxbw m3, m4<br>
+ punpckhbw m4, m6<br>
+ movh m5, [r3 + r5 + 16]<br>
+ pmovzxbw m5, m5<br>
+<br>
+ psubw m0, m3<br>
+ psubw m1, m4<br>
+ psubw m2, m5<br>
+<br>
+ movu [r0 + r1], m0<br>
+ movu [r0 + r1 + 16], m1<br>
+ movu [r0 + r1 + 32], m2<br>
+<br>
+ lea r2, [r2 + 2 * r4]<br>
+ lea r3, [r3 + 2 * r5]<br>
+ lea r0, [r0 + 2 * r1]<br>
+<br>
+ dec r6d<br>
+<br>
+jnz .loop<br>
+<br>
+RET<br>
+%endmacro<br>
+<br>
+PIXELSUB_PS_W24_H2 24, 32<br>
+<br>
+;-----------------------------------------------------------------------------<br>
+; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);<br>
+;-----------------------------------------------------------------------------<br>
+%macro PIXELSUB_PS_W32_H2 2<br>
+INIT_XMM sse4<br>
+cglobal pixel_sub_ps_%1x%2, 6, 7, 8, dest, deststride, src0, src1, srcstride0, srcstride1<br>
+<br>
+add r1, r1<br>
+mov r6d, %2/2<br>
+<br>
+.loop<br>
+<br>
+ movh m0, [r2]<br>
+ movh m1, [r2 + 8]<br>
+ movh m2, [r2 + 16]<br>
+ movh m3, [r2 + 24]<br>
+ movh m4, [r3]<br>
+ movh m5, [r3 + 8]<br>
+ movh m6, [r3 + 16]<br>
+ movh m7, [r3 + 24]<br>
+<br>
+ pmovzxbw m0, m0<br>
+ pmovzxbw m1, m1<br>
+ pmovzxbw m2, m2<br>
+ pmovzxbw m3, m3<br>
+ pmovzxbw m4, m4<br>
+ pmovzxbw m5, m5<br>
+ pmovzxbw m6, m6<br>
+ pmovzxbw m7, m7<br>
+<br>
+ psubw m0, m4<br>
+ psubw m1, m5<br>
+ psubw m2, m6<br>
+ psubw m3, m7<br>
+<br>
+ movu [r0], m0<br>
+ movu [r0 + 16], m1<br>
+ movu [r0 + 32], m2<br>
+ movu [r0 + 48], m3<br>
+<br>
+ movh m0, [r2 + r4]<br>
+ movh m1, [r2 + r4 + 8]<br>
+ movh m2, [r2 + r4 + 16]<br>
+ movh m3, [r2 + r4 + 24]<br>
+ movh m4, [r3 + r5]<br>
+ movh m5, [r3 + r5 + 8]<br>
+ movh m6, [r3 + r5 + 16]<br>
+ movh m7, [r3 + r5 + 24]<br>
+<br>
+ pmovzxbw m0, m0<br>
+ pmovzxbw m1, m1<br>
+ pmovzxbw m2, m2<br>
+ pmovzxbw m3, m3<br>
+ pmovzxbw m4, m4<br>
+ pmovzxbw m5, m5<br>
+ pmovzxbw m6, m6<br>
+ pmovzxbw m7, m7<br>
+<br>
+ psubw m0, m4<br>
+ psubw m1, m5<br>
+ psubw m2, m6<br>
+ psubw m3, m7<br>
+<br>
+ movu [r0 + r1], m0<br>
+ movu [r0 + r1 + 16], m1<br>
+ movu [r0 + r1 + 32], m2<br>
+ movu [r0 + r1 + 48], m3<br>
+<br>
+ lea r2, [r2 + 2 * r4]<br>
+ lea r3, [r3 + 2 * r5]<br>
+ lea r0, [r0 + 2 * r1]<br>
+<br>
+ dec r6d<br>
+<br>
+jnz .loop<br>
+<br>
+RET<br>
+%endmacro<br>
+<br>
+PIXELSUB_PS_W32_H2 32, 8<br>
+PIXELSUB_PS_W32_H2 32, 16<br>
+PIXELSUB_PS_W32_H2 32, 24<br>
+PIXELSUB_PS_W32_H2 32, 32<br>
+PIXELSUB_PS_W32_H2 32, 64<br>
+<br>
+;-----------------------------------------------------------------------------<br>
+; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);<br>
+;-----------------------------------------------------------------------------<br>
+%macro PIXELSUB_PS_W48_H2 2<br>
+INIT_XMM sse4<br>
+cglobal pixel_sub_ps_%1x%2, 6, 7, 7, dest, deststride, src0, src1, srcstride0, srcstride1<br>
+<br>
+add r1, r1<br>
+mov r6d, %2/2<br>
+pxor m6, m6<br>
+<br>
+.loop<br>
+<br>
+ movu m1, [r2]<br>
+ pmovzxbw m0, m1<br>
+ punpckhbw m1, m6<br>
+ movu m3, [r3]<br>
+ pmovzxbw m2, m3<br>
+ punpckhbw m3, m6<br>
+ movu m5, [r2 + 16]<br>
+ pmovzxbw m4, m5<br>
+ punpckhbw m5, m6<br>
+<br>
+ psubw m0, m2<br>
+ psubw m1, m3<br>
+<br>
+ movu [r0], m0<br>
+ movu [r0 + 16], m1<br>
+<br>
+ movu m3, [r3 + 16]<br>
+ pmovzxbw m2, m3<br>
+ punpckhbw m3, m6<br>
+<br>
+ psubw m4, m2<br>
+ psubw m5, m3<br>
+<br>
+ movu [r0 + 32], m4<br>
+ movu [r0 + 48], m5<br>
+<br>
+ movu m1, [r2 + 32]<br>
+ pmovzxbw m0, m1<br>
+ punpckhbw m1, m6<br>
+ movu m3, [r3 + 32]<br>
+ pmovzxbw m2, m3<br>
+ punpckhbw m3, m6<br>
+<br>
+ psubw m0, m2<br>
+ psubw m1, m3<br>
+<br>
+ movu [r0 + 64], m0<br>
+ movu [r0 + 80], m1<br>
+<br>
+ movu m1, [r2 + r4]<br>
+ pmovzxbw m0, m1<br>
+ punpckhbw m1, m6<br>
+ movu m3, [r3 + r5]<br>
+ pmovzxbw m2, m3<br>
+ punpckhbw m3, m6<br>
+ movu m5, [r2 + r5 + 16]<br>
+ pmovzxbw m4, m5<br>
+ punpckhbw m5, m6<br>
+<br>
+ psubw m0, m2<br>
+ psubw m1, m3<br>
+<br>
+ movu [r0 + r1], m0<br>
+ movu [r0 + r1 + 16], m1<br>
+<br>
+ movu m3, [r3 + r4 + 16]<br>
+ pmovzxbw m2, m3<br>
+ punpckhbw m3, m6<br>
+<br>
+ psubw m4, m2<br>
+ psubw m5, m3<br>
+<br>
+ movu [r0 + r1 + 32], m4<br>
+ movu [r0 + r1 + 48], m5<br>
+<br>
+ movu m1, [r2 + r4 + 32]<br>
+ pmovzxbw m0, m1<br>
+ punpckhbw m1, m6<br>
+ movu m3, [r3 + r5 + 32]<br>
+ pmovzxbw m2, m3<br>
+ punpckhbw m3, m6<br>
+<br>
+ psubw m0, m2<br>
+ psubw m1, m3<br>
+<br>
+ movu [r0 + r1 + 64], m0<br>
+ movu [r0 + r1 + 80], m1<br>
+<br>
+ lea r2, [r2 + 2 * r4]<br>
+ lea r3, [r3 + 2 * r5]<br>
+ lea r0, [r0 + 2 * r1]<br>
+<br>
+ dec r6d<br>
+<br>
+jnz .loop<br>
+<br>
+RET<br>
+%endmacro<br>
+<br>
+PIXELSUB_PS_W48_H2 48, 64<br>
+<br>
+;-----------------------------------------------------------------------------<br>
+; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);<br>
+;-----------------------------------------------------------------------------<br>
+%macro PIXELSUB_PS_W64_H2 2<br>
+INIT_XMM sse4<br>
+cglobal pixel_sub_ps_%1x%2, 6, 7, 7, dest, deststride, src0, src1, srcstride0, srcstride1<br>
+<br>
+add r1, r1<br>
+mov r6d, %2/2<br>
+pxor m6, m6<br>
+<br>
+.loop<br>
+<br>
+ movu m1, [r2]<br>
+ pmovzxbw m0, m1<br>
+ punpckhbw m1, m6<br>
+ movu m3, [r3]<br>
+ pmovzxbw m2, m3<br>
+ punpckhbw m3, m6<br>
+ movu m5, [r2 + 16]<br>
+ pmovzxbw m4, m5<br>
+ punpckhbw m5, m6<br>
+<br>
+ psubw m0, m2<br>
+ psubw m1, m3<br>
+<br>
+ movu [r0], m0<br>
+ movu [r0 + 16], m1<br>
+<br>
+ movu m1, [r3 + 16]<br>
+ pmovzxbw m0, m1<br>
+ punpckhbw m1, m6<br>
+ movu m3, [r2 + 32]<br>
+ pmovzxbw m2, m3<br>
+ punpckhbw m3, m6<br>
+<br>
+ psubw m4, m0<br>
+ psubw m5, m1<br>
+<br>
+ movu [r0 + 32], m4<br>
+ movu [r0 + 48], m5<br>
+<br>
+ movu m5, [r3 + 32]<br>
+ pmovzxbw m4, m5<br>
+ punpckhbw m5, m6<br>
+ movu m1, [r2 + 48]<br>
+ pmovzxbw m0, m1<br>
+ punpckhbw m1, m6<br>
+<br>
+ psubw m2, m4<br>
+ psubw m3, m5<br>
+<br>
+ movu [r0 + 64], m2<br>
+ movu [r0 + 80], m3<br>
+<br>
+ movu m3, [r3 + 48]<br>
+ pmovzxbw m2, m3<br>
+ punpckhbw m3, m6<br>
+ movu m5, [r2 + r4]<br>
+ pmovzxbw m4, m5<br>
+ punpckhbw m5, m6<br>
+<br>
+ psubw m0, m2<br>
+ psubw m1, m3<br>
+<br>
+ movu [r0 + 96], m0<br>
+ movu [r0 + 112], m1<br>
+<br>
+ movu m1, [r3 + r5]<br>
+ pmovzxbw m0, m1<br>
+ punpckhbw m1, m6<br>
+ movu m3, [r2 + r4 + 16]<br>
+ pmovzxbw m2, m3<br>
+ punpckhbw m3, m6<br>
+<br>
+ psubw m4, m0<br>
+ psubw m5, m1<br>
+<br>
+ movu [r0 + r1], m4<br>
+ movu [r0 + r1 + 16], m5<br>
+<br>
+ movu m5, [r3 + r5 + 16]<br>
+ pmovzxbw m4, m5<br>
+ punpckhbw m5, m6<br>
+ movu m1, [r2 + r4 + 32]<br>
+ pmovzxbw m0, m1<br>
+ punpckhbw m1, m6<br>
+<br>
+ psubw m2, m4<br>
+ psubw m3, m5<br>
+<br>
+ movu [r0 + r1 + 32], m2<br>
+ movu [r0 + r1 + 48], m3<br>
+<br>
+ movu m3, [r3 + r5 + 32]<br>
+ pmovzxbw m2, m3<br>
+ punpckhbw m3, m6<br>
+ movu m5, [r2 + r4 + 48]<br>
+ pmovzxbw m4, m5<br>
+ punpckhbw m5, m6<br>
+<br>
+ psubw m0, m2<br>
+ psubw m1, m3<br>
+<br>
+ movu [r0 + r1 + 64], m0<br>
+ movu [r0 + r1 + 80], m1<br>
+<br>
+ movu m1, [r3 + r5 + 48]<br>
+ pmovzxbw m0, m1<br>
+ punpckhbw m1, m6<br>
+<br>
+ psubw m4, m0<br>
+ psubw m5, m1<br>
+<br>
+ movu [r0 + r1 + 96], m4<br>
+ movu [r0 + r1 + 112], m5<br>
+<br>
+ lea r2, [r2 + 2 * r4]<br>
+ lea r3, [r3 + 2 * r5]<br>
+ lea r0, [r0 + 2 * r1]<br>
+<br>
+ dec r6d<br>
+<br>
+jnz .loop<br>
+<br>
+RET<br>
+%endmacro<br>
+<br>
+PIXELSUB_PS_W64_H2 64, 16<br>
+PIXELSUB_PS_W64_H2 64, 32<br>
+PIXELSUB_PS_W64_H2 64, 48<br>
+PIXELSUB_PS_W64_H2 64, 64<br>
_______________________________________________<br>
x265-devel mailing list<br>
<a href="mailto:x265-devel@videolan.org">x265-devel@videolan.org</a><br>
<a href="https://mailman.videolan.org/listinfo/x265-devel" target="_blank">https://mailman.videolan.org/listinfo/x265-devel</a><br>
</blockquote></div><br><br clear="all"><div><br></div>-- <br>Steve Borho
</div></div>