[x265] [PATCH] asm: pixelsub_ps routine for all block sizes
murugan at multicorewareinc.com
murugan at multicorewareinc.com
Wed Nov 13 06:37:04 CET 2013
# HG changeset patch
# User Murugan Vairavel <murugan at multicorewareinc.com>
# Date 1384320986 -19800
# Wed Nov 13 11:06:26 2013 +0530
# Node ID 2d6dd46dc286b287689444974a57bfcdd21d2ecd
# Parent 90c2763ee0272247dabce845f4fcc3c3e73316fb
asm: pixelsub_ps routine for all block sizes
diff -r 90c2763ee027 -r 2d6dd46dc286 source/common/x86/pixel-a.asm
--- a/source/common/x86/pixel-a.asm Tue Nov 12 16:55:09 2013 +0530
+++ b/source/common/x86/pixel-a.asm Wed Nov 13 11:06:26 2013 +0530
@@ -5580,3 +5580,1207 @@
jl .loop
movifnidn eax, r0d
RET
+
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_c_2x4(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+%if ARCH_X86_64
+ cglobal pixel_sub_ps_2x4, 6, 8, 0
+
+ %define tmp_r1 r1
+ DECLARE_REG_TMP 6, 7
+%else
+ cglobal pixel_sub_ps_2x4, 6, 7, 0, 0-4
+
+ %define tmp_r1 dword [rsp]
+ DECLARE_REG_TMP 6, 1
+%endif ; ARCH_X86_64
+
+ add r1, r1
+
+%if ARCH_X86_64 == 0
+ mov tmp_r1, r1
+
+%endif
+
+movzx t0d, byte [r2]
+movzx t1d, byte [r3]
+
+sub t0d, t1d
+
+mov [r0], t0w
+
+movzx t0d, byte [r2 + 1]
+movzx t1d, byte [r3 + 1]
+
+sub t0d, t1d
+
+mov [r0 + 2], t0w
+
+add r0, tmp_r1
+
+movzx t0d, byte [r2 + r4]
+movzx t1d, byte [r3 + r5]
+
+sub t0d, t1d
+
+mov [r0], t0w
+
+movzx t0d, byte [r2 + r4 + 1]
+movzx t1d, byte [r3 + r5 + 1]
+
+sub t0d, t1d
+
+mov [r0 + 2], t0w
+
+add r0, tmp_r1
+
+movzx t0d, byte [r2 + r4 * 2]
+movzx t1d, byte [r3 + r5 * 2]
+
+sub t0d, t1d
+
+mov [r0], t0w
+
+movzx t0d, byte [r2 + r4 * 2 + 1]
+movzx t1d, byte [r3 + r5 * 2 + 1]
+
+sub t0d, t1d
+
+mov [r0 + 2], t0w
+
+add r0, tmp_r1
+
+lea r2, [r2 + r4 * 2]
+lea r3, [r3 + r5 * 2]
+
+movzx t0d, byte [r2 + r4]
+movzx t1d, byte [r3 + r5]
+
+sub t0d, t1d
+
+mov [r0], t0w
+
+movzx t0d, byte [r2 + r4 + 1]
+movzx t1d, byte [r3 + r5 + 1]
+
+sub t0d, t1d
+
+mov [r0 + 2], t0w
+
+RET
+
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_c_2x8(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+%if ARCH_X86_64
+ cglobal pixel_sub_ps_2x8, 6, 8, 0
+
+ %define tmp_r1 r1
+ DECLARE_REG_TMP 6, 7
+%else
+ cglobal pixel_sub_ps_2x8, 6, 7, 0, 0-4
+
+ %define tmp_r1 dword [rsp]
+ DECLARE_REG_TMP 6, 1
+%endif ; ARCH_X86_64
+
+ add r1, r1
+
+%if ARCH_X86_64 == 0
+ mov tmp_r1, r1
+
+%endif
+
+ movzx t0d, byte [r2]
+ movzx t1d, byte [r3]
+
+ sub t0d, t1d
+
+ mov [r0], t0w
+ movzx t0d, byte [r2 + 1]
+ movzx t1d, byte [r3 + 1]
+
+ sub t0d, t1d
+
+ mov [r0 + 2], t0w
+
+ add r0, tmp_r1
+
+ movzx t0d, byte [r2 + r4]
+ movzx t1d, byte [r3 + r5]
+
+ sub t0d, t1d
+
+ mov [r0], t0w
+ movzx t0d, byte [r2 + r4 + 1]
+ movzx t1d, byte [r3 + r5 + 1]
+
+ sub t0d, t1d
+
+ mov [r0 + 2], t0w
+
+ add r0, tmp_r1
+
+ movzx t0d, byte [r2 + r4 * 2]
+ movzx t1d, byte [r3 + r5 * 2]
+
+ sub t0d, t1d
+
+ mov [r0], t0w
+ movzx t0d, byte [r2 + r4 * 2 + 1]
+ movzx t1d, byte [r3 + r5 * 2 + 1]
+
+ sub t0d, t1d
+
+ mov [r0 + 2], t0w
+
+ add r0, tmp_r1
+
+ lea r2, [r2 + r4 * 2]
+ lea r3, [r3 + r5 * 2]
+
+ movzx t0d, byte [r2 + r4]
+ movzx t1d, byte [r3 + r5]
+
+ sub t0d, t1d
+
+ mov [r0], t0w
+ movzx t0d, byte [r2 + r4 + 1]
+ movzx t1d, byte [r3 + r5 + 1]
+
+ sub t0d, t1d
+
+ mov [r0 + 2], t0w
+
+ add r0, tmp_r1
+
+ movzx t0d, byte [r2 + r4 * 2]
+ movzx t1d, byte [r3 + r5 * 2]
+
+ sub t0d, t1d
+
+ mov [r0], t0w
+ movzx t0d, byte [r2 + r4 * 2 + 1]
+ movzx t1d, byte [r3 + r5 * 2 + 1]
+
+ sub t0d, t1d
+
+ mov [r0 + 2], t0w
+
+ add r0, tmp_r1
+
+ lea r2, [r2 + r4 * 2]
+ lea r3, [r3 + r5 * 2]
+
+ movzx t0d, byte [r2 + r4]
+ movzx t1d, byte [r3 + r5]
+
+ sub t0d, t1d
+
+ mov [r0], t0w
+ movzx t0d, byte [r2 + r4 + 1]
+ movzx t1d, byte [r3 + r5 + 1]
+
+ sub t0d, t1d
+
+ mov [r0 + 2], t0w
+
+ add r0, tmp_r1
+
+ movzx t0d, byte [r2 + r4 * 2]
+ movzx t1d, byte [r3 + r5 * 2]
+
+ sub t0d, t1d
+
+ mov [r0], t0w
+ movzx t0d, byte [r2 + r4 * 2 + 1]
+ movzx t1d, byte [r3 + r5 * 2 + 1]
+
+ sub t0d, t1d
+
+ mov [r0 + 2], t0w
+
+ add r0, tmp_r1
+
+ lea r2, [r2 + r4 * 2]
+ lea r3, [r3 + r5 * 2]
+
+ movzx t0d, byte [r2 + r4]
+ movzx t1d, byte [r3 + r5]
+
+ sub t0d, t1d
+
+ mov [r0], t0w
+ movzx t0d, byte [r2 + r4 + 1]
+ movzx t1d, byte [r3 + r5 + 1]
+
+ sub t0d, t1d
+
+ mov [r0 + 2], t0w
+
+RET
+
+;-----------------------------------------------------------------------------
+; void pixel_sub_sp_c_4x2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_sub_ps_4x2, 6, 6, 4, dest, deststride, src0, src1, srcstride0, srcstride1
+
+add r1, r1
+
+movd m0, [r2]
+movd m1, [r3]
+
+movd m2, [r2 + r4]
+movd m3, [r3 + r5]
+
+punpckldq m0, m2
+punpckldq m1, m3
+pmovzxbw m0, m0
+pmovzxbw m1, m1
+
+psubw m0, m1
+
+movlps [r0], m0
+movhps [r0 + r1], m0
+
+RET
+
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_c_4x4(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_sub_ps_4x4, 6, 6, 8, dest, deststride, src0, src1, srcstride0, srcstride1
+
+add r1, r1
+
+movd m0, [r2]
+movd m1, [r3]
+
+movd m2, [r2 + r4]
+movd m3, [r3 + r5]
+
+movd m4, [r2 + 2 * r4]
+movd m5, [r3 + 2 * r5]
+
+lea r2, [r2 + 2 * r4]
+lea r3, [r3 + 2 * r5]
+
+movd m6, [r2 + r4]
+movd m7, [r3 + r5]
+
+punpckldq m0, m2
+punpckldq m1, m3
+punpckldq m4, m6
+punpckldq m5, m7
+
+pmovzxbw m0, m0
+pmovzxbw m1, m1
+pmovzxbw m4, m4
+pmovzxbw m5, m5
+
+psubw m0, m1
+psubw m4, m5
+
+movlps [r0], m0
+movhps [r0 + r1], m0
+movlps [r0 + 2 * r1], m4
+
+lea r0, [r0 + 2 * r1]
+
+movhps [r0 + r1], m4
+
+RET
+
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+%macro PIXELSUB_PS_W4_H4 2
+INIT_XMM sse4
+cglobal pixel_sub_ps_%1x%2, 6, 7, 8, dest, deststride, src0, src1, srcstride0, srcstride1
+
+add r1, r1
+mov r6d, %2/4
+
+.loop
+
+ movd m0, [r2]
+ movd m1, [r3]
+
+ movd m2, [r2 + r4]
+ movd m3, [r3 + r5]
+
+ movd m4, [r2 + 2 * r4]
+ movd m5, [r3 + 2 * r5]
+
+ lea r2, [r2 + 2 * r4]
+ lea r3, [r3 + 2 * r5]
+
+ movd m6, [r2 + r4]
+ movd m7, [r3 + r5]
+
+ punpckldq m0, m2
+ punpckldq m1, m3
+ punpckldq m4, m6
+ punpckldq m5, m7
+
+ pmovzxbw m0, m0
+ pmovzxbw m1, m1
+ pmovzxbw m4, m4
+ pmovzxbw m5, m5
+
+ psubw m0, m1
+ psubw m4, m5
+
+ movlps [r0], m0
+ movhps [r0 + r1], m0
+ movlps [r0 + 2 * r1], m4
+
+ lea r0, [r0 + 2 * r1]
+
+ movhps [r0 + r1], m4
+
+ lea r2, [r2 + 2 * r4]
+ lea r3, [r3 + 2 * r5]
+ lea r0, [r0 + 2 * r1]
+
+ dec r6d
+
+jnz .loop
+
+RET
+%endmacro
+
+PIXELSUB_PS_W4_H4 4, 8
+PIXELSUB_PS_W4_H4 4, 16
+
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+%macro PIXELSUB_PS_W6_H4 2
+INIT_XMM sse4
+cglobal pixel_sub_ps_%1x%2, 6, 7, 8, dest, deststride, src0, src1, srcstride0, srcstride1
+
+add r1, r1
+mov r6d, %2/4
+
+.loop
+
+ movh m0, [r2]
+ movh m1, [r3]
+
+ movh m2, [r2 + r4]
+ movh m3, [r3 + r5]
+
+ movh m4, [r2 + 2 * r4]
+ movh m5, [r3 + 2 * r5]
+
+ lea r2, [r2 + 2 * r4]
+ lea r3, [r3 + 2 * r5]
+
+ movh m6, [r2 + r4]
+ movh m7, [r3 + r5]
+
+ pmovzxbw m0, m0
+ pmovzxbw m1, m1
+ pmovzxbw m2, m2
+ pmovzxbw m3, m3
+ pmovzxbw m4, m4
+ pmovzxbw m5, m5
+ pmovzxbw m6, m6
+ pmovzxbw m7, m7
+
+ psubw m0, m1
+ psubw m2, m3
+ psubw m4, m5
+ psubw m6, m7
+
+ movh [r0], m0
+ pextrd [r0 + 8], m0, 2
+ movh [r0 + r1], m2
+ pextrd [r0 + r1 + 8], m2, 2
+ movh [r0 + 2* r1], m4
+ pextrd [r0 + 2 * r1 + 8], m4, 2
+
+ lea r0, [r0 + 2 * r1]
+
+ movh [r0 + r1], m6
+ pextrd [r0 + r1 + 8], m6, 2
+
+ lea r2, [r2 + 2 * r4]
+ lea r3, [r3 + 2 * r5]
+ lea r0, [r0 + 2 * r1]
+
+ dec r6d
+
+jnz .loop
+
+RET
+%endmacro
+
+PIXELSUB_PS_W6_H4 6, 8
+
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_c_8x2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_sub_ps_8x2, 6, 6, 4, dest, deststride, src0, src1, srcstride0, srcstride1
+
+add r1, r1
+
+movh m0, [r2]
+movh m1, [r3]
+pmovzxbw m0, m0
+pmovzxbw m1, m1
+
+movh m2, [r2 + r4]
+movh m3, [r3 + r5]
+pmovzxbw m2, m2
+pmovzxbw m3, m3
+
+psubw m0, m1
+psubw m2, m3
+
+movu [r0], m0
+movu [r0 + r1], m2
+
+RET
+
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_c_8x4(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_sub_ps_8x4, 6, 6, 8, dest, deststride, src0, src1, srcstride0, srcstride1
+
+add r1, r1
+
+movh m0, [r2]
+movh m1, [r3]
+pmovzxbw m0, m0
+pmovzxbw m1, m1
+
+movh m2, [r2 + r4]
+movh m3, [r3 + r5]
+pmovzxbw m2, m2
+pmovzxbw m3, m3
+
+movh m4, [r2 + 2 * r4]
+movh m5, [r3 + 2 * r5]
+pmovzxbw m4, m4
+pmovzxbw m5, m5
+
+psubw m0, m1
+psubw m2, m3
+psubw m4, m5
+
+lea r2, [r2 + 2 * r4]
+lea r3, [r3 + 2 * r5]
+
+movh m6, [r2 + r4]
+movh m7, [r3 + r5]
+pmovzxbw m6, m6
+pmovzxbw m7, m7
+
+psubw m6, m7
+
+movu [r0], m0
+movu [r0 + r1], m2
+movu [r0 + 2 * r1], m4
+
+lea r0, [r0 + 2 * r1]
+
+movu [r0 + r1], m6
+
+RET
+
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_c_8x6(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+INIT_XMM sse4
+cglobal pixel_sub_ps_8x6, 6, 6, 8, dest, deststride, src0, src1, srcstride0, srcstride1
+
+add r1, r1
+
+movh m0, [r2]
+movh m1, [r3]
+pmovzxbw m0, m0
+pmovzxbw m1, m1
+
+movh m2, [r2 + r4]
+movh m3, [r3 + r5]
+pmovzxbw m2, m2
+pmovzxbw m3, m3
+
+movh m4, [r2 + 2 * r4]
+movh m5, [r3 + 2 * r5]
+pmovzxbw m4, m4
+pmovzxbw m5, m5
+
+psubw m0, m1
+psubw m2, m3
+psubw m4, m5
+
+lea r2, [r2 + 2 * r4]
+lea r3, [r3 + 2 * r5]
+
+movh m6, [r2 + r4]
+movh m7, [r3 + r5]
+pmovzxbw m6, m6
+pmovzxbw m7, m7
+
+movh m1, [r2 + 2 * r4]
+movh m3, [r3 + 2 * r5]
+pmovzxbw m1, m1
+pmovzxbw m3, m3
+
+psubw m6, m7
+psubw m1, m3
+
+lea r2, [r2 + 2 * r4]
+lea r3, [r3 + 2 * r5]
+
+movh m3, [r2 + r4]
+movh m5, [r3 + r5]
+pmovzxbw m3, m3
+pmovzxbw m5, m5
+
+psubw m3, m5
+
+movu [r0], m0
+movu [r0 + r1], m2
+movu [r0 + 2 * r1], m4
+
+lea r0, [r0 + 2 * r1]
+
+movu [r0 + r1], m6
+movu [r0 + 2 * r1], m1
+
+lea r0, [r0 + 2 * r1]
+
+movu [r0 + r1], m3
+
+RET
+
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+%macro PIXELSUB_PS_W8_H4 2
+INIT_XMM sse4
+cglobal pixel_sub_ps_%1x%2, 6, 7, 8, dest, deststride, src0, src1, srcstride0, srcstride1
+
+add r1, r1
+mov r6d, %2/4
+
+.loop
+
+ movh m0, [r2]
+ movh m1, [r3]
+ pmovzxbw m0, m0
+ pmovzxbw m1, m1
+
+ movh m2, [r2 + r4]
+ movh m3, [r3 + r5]
+ pmovzxbw m2, m2
+ pmovzxbw m3, m3
+
+ movh m4, [r2 + 2 * r4]
+ movh m5, [r3 + 2 * r5]
+ pmovzxbw m4, m4
+ pmovzxbw m5, m5
+
+ psubw m0, m1
+ psubw m2, m3
+ psubw m4, m5
+
+ lea r2, [r2 + 2 * r4]
+ lea r3, [r3 + 2 * r5]
+
+ movh m6, [r2 + r4]
+ movh m7, [r3 + r5]
+ pmovzxbw m6, m6
+ pmovzxbw m7, m7
+
+ psubw m6, m7
+
+ movu [r0], m0
+ movu [r0 + r1], m2
+ movu [r0 + 2 * r1], m4
+
+ lea r0, [r0 + 2 * r1]
+
+ movu [r0 + r1], m6
+
+ lea r2, [r2 + 2 * r4]
+ lea r3, [r3 + 2 * r5]
+ lea r0, [r0 + 2 * r1]
+
+ dec r6d
+
+jnz .loop
+
+RET
+%endmacro
+
+PIXELSUB_PS_W8_H4 8, 8
+PIXELSUB_PS_W8_H4 8, 16
+PIXELSUB_PS_W8_H4 8, 32
+
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+%macro PIXELSUB_PS_W12_H4 2
+INIT_XMM sse4
+cglobal pixel_sub_ps_%1x%2, 6, 7, 6, dest, deststride, src0, src1, srcstride0, srcstride1
+
+add r1, r1
+mov r6d, %2/4
+
+.loop
+
+ movu m0, [r2]
+ movu m1, [r3]
+ movu m2, [r2 + r4]
+ movu m3, [r3 + r5]
+
+ mova m4, m0
+ mova m5, m1
+ punpckhdq m4, m2
+ punpckhdq m5, m3
+
+ pmovzxbw m0, m0
+ pmovzxbw m1, m1
+ pmovzxbw m2, m2
+ pmovzxbw m3, m3
+ pmovzxbw m4, m4
+ pmovzxbw m5, m5
+
+ psubw m0, m1
+ psubw m2, m3
+ psubw m4, m5
+
+ movu [r0], m0
+ movlps [r0 + 16], m4
+ movu [r0 + r1], m2
+ movhps [r0 + r1 + 16], m4
+
+ movu m0, [r2 + 2 * r4]
+ movu m1, [r3 + 2 * r5]
+
+ lea r2, [r2 + 2 * r4]
+ lea r3, [r3 + 2 * r5]
+
+ movu m2, [r2 + r4]
+ movu m3, [r3 + r5]
+
+ mova m4, m0
+ mova m5, m1
+ punpckhdq m4, m2
+ punpckhdq m5, m3
+
+ pmovzxbw m0, m0
+ pmovzxbw m1, m1
+ pmovzxbw m2, m2
+ pmovzxbw m3, m3
+ pmovzxbw m4, m4
+ pmovzxbw m5, m5
+
+ psubw m0, m1
+ psubw m2, m3
+ psubw m4, m5
+
+ movu [r0 + 2 * r1], m0
+ movlps [r0 + 2 * r1 + 16], m4
+
+ lea r0, [r0 + 2 * r1]
+
+ movu [r0 + r1], m2
+ movhps [r0 + r1 + 16], m4
+
+ lea r2, [r2 + 2 * r4]
+ lea r3, [r3 + 2 * r5]
+ lea r0, [r0 + 2 * r1]
+
+ dec r6d
+
+jnz .loop
+
+RET
+%endmacro
+
+PIXELSUB_PS_W12_H4 12, 16
+
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+%macro PIXELSUB_PS_W16_H4 2
+INIT_XMM sse4
+cglobal pixel_sub_ps_%1x%2, 6, 7, 7, dest, deststride, src0, src1, srcstride0, srcstride1
+
+add r1, r1
+mov r6d, %2/4
+pxor m6, m6
+
+.loop
+
+ movu m1, [r2]
+ pmovzxbw m0, m1
+ punpckhbw m1, m6
+ movu m3, [r3]
+ pmovzxbw m2, m3
+ punpckhbw m3, m6
+
+ psubw m0, m2
+ psubw m1, m3
+
+ movu m5, [r2 + r4]
+ pmovzxbw m4, m5
+ punpckhbw m5, m6
+ movu m3, [r3 + r5]
+ pmovzxbw m2, m3
+ punpckhbw m3, m6
+
+ psubw m4, m2
+ psubw m5, m3
+
+ movu [r0], m0
+ movu [r0 + 16], m1
+ movu [r0 + r1], m4
+ movu [r0 + r1 + 16], m5
+
+ movu m1, [r2 + 2 * r4]
+ pmovzxbw m0, m1
+ punpckhbw m1, m6
+ movu m3, [r3 + 2 * r5]
+ pmovzxbw m2, m3
+ punpckhbw m3, m6
+
+ lea r2, [r2 + 2 * r4]
+ lea r3, [r3 + 2 * r5]
+
+ psubw m0, m2
+ psubw m1, m3
+
+ movu m5, [r2 + r4]
+ pmovzxbw m4, m5
+ punpckhbw m5, m6
+ movu m3, [r3 + r5]
+ pmovzxbw m2, m3
+ punpckhbw m3, m6
+
+ psubw m4, m2
+ psubw m5, m3
+
+ movu [r0 + 2 * r1], m0
+ movu [r0 + 2 * r1 + 16], m1
+
+ lea r0, [r0 + 2 * r1]
+
+ movu [r0 + r1], m4
+ movu [r0 + r1 + 16], m5
+
+ lea r2, [r2 + 2 * r4]
+ lea r3, [r3 + 2 * r5]
+ lea r0, [r0 + 2 * r1]
+
+ dec r6d
+
+jnz .loop
+
+RET
+%endmacro
+
+PIXELSUB_PS_W16_H4 16, 4
+PIXELSUB_PS_W16_H4 16, 8
+PIXELSUB_PS_W16_H4 16, 12
+PIXELSUB_PS_W16_H4 16, 16
+PIXELSUB_PS_W16_H4 16, 32
+PIXELSUB_PS_W16_H4 16, 64
+
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+%macro PIXELSUB_PS_W24_H2 2
+INIT_XMM sse4
+cglobal pixel_sub_ps_%1x%2, 6, 7, 7, dest, deststride, src0, src1, srcstride0, srcstride1
+
+add r1, r1
+mov r6d, %2/2
+pxor m6, m6
+
+.loop
+
+ movu m1, [r2]
+ pmovzxbw m0, m1
+ punpckhbw m1, m6
+ movh m2, [r2 + 16]
+ pmovzxbw m2, m2
+ movu m4, [r3]
+ pmovzxbw m3, m4
+ punpckhbw m4, m6
+ movh m5, [r3 + 16]
+ pmovzxbw m5, m5
+
+ psubw m0, m3
+ psubw m1, m4
+ psubw m2, m5
+
+ movu [r0], m0
+ movu [r0 + 16], m1
+ movu [r0 + 32], m2
+
+ movu m1, [r2 + r4]
+ pmovzxbw m0, m1
+ punpckhbw m1, m6
+ movh m2, [r2 + r4 + 16]
+ pmovzxbw m2, m2
+ movu m4, [r3 + r5]
+ pmovzxbw m3, m4
+ punpckhbw m4, m6
+ movh m5, [r3 + r5 + 16]
+ pmovzxbw m5, m5
+
+ psubw m0, m3
+ psubw m1, m4
+ psubw m2, m5
+
+ movu [r0 + r1], m0
+ movu [r0 + r1 + 16], m1
+ movu [r0 + r1 + 32], m2
+
+ lea r2, [r2 + 2 * r4]
+ lea r3, [r3 + 2 * r5]
+ lea r0, [r0 + 2 * r1]
+
+ dec r6d
+
+jnz .loop
+
+RET
+%endmacro
+
+PIXELSUB_PS_W24_H2 24, 32
+
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+%macro PIXELSUB_PS_W32_H2 2
+INIT_XMM sse4
+cglobal pixel_sub_ps_%1x%2, 6, 7, 8, dest, deststride, src0, src1, srcstride0, srcstride1
+
+add r1, r1
+mov r6d, %2/2
+
+.loop
+
+ movh m0, [r2]
+ movh m1, [r2 + 8]
+ movh m2, [r2 + 16]
+ movh m3, [r2 + 24]
+ movh m4, [r3]
+ movh m5, [r3 + 8]
+ movh m6, [r3 + 16]
+ movh m7, [r3 + 24]
+
+ pmovzxbw m0, m0
+ pmovzxbw m1, m1
+ pmovzxbw m2, m2
+ pmovzxbw m3, m3
+ pmovzxbw m4, m4
+ pmovzxbw m5, m5
+ pmovzxbw m6, m6
+ pmovzxbw m7, m7
+
+ psubw m0, m4
+ psubw m1, m5
+ psubw m2, m6
+ psubw m3, m7
+
+ movu [r0], m0
+ movu [r0 + 16], m1
+ movu [r0 + 32], m2
+ movu [r0 + 48], m3
+
+ movh m0, [r2 + r4]
+ movh m1, [r2 + r4 + 8]
+ movh m2, [r2 + r4 + 16]
+ movh m3, [r2 + r4 + 24]
+ movh m4, [r3 + r5]
+ movh m5, [r3 + r5 + 8]
+ movh m6, [r3 + r5 + 16]
+ movh m7, [r3 + r5 + 24]
+
+ pmovzxbw m0, m0
+ pmovzxbw m1, m1
+ pmovzxbw m2, m2
+ pmovzxbw m3, m3
+ pmovzxbw m4, m4
+ pmovzxbw m5, m5
+ pmovzxbw m6, m6
+ pmovzxbw m7, m7
+
+ psubw m0, m4
+ psubw m1, m5
+ psubw m2, m6
+ psubw m3, m7
+
+ movu [r0 + r1], m0
+ movu [r0 + r1 + 16], m1
+ movu [r0 + r1 + 32], m2
+ movu [r0 + r1 + 48], m3
+
+ lea r2, [r2 + 2 * r4]
+ lea r3, [r3 + 2 * r5]
+ lea r0, [r0 + 2 * r1]
+
+ dec r6d
+
+jnz .loop
+
+RET
+%endmacro
+
+PIXELSUB_PS_W32_H2 32, 8
+PIXELSUB_PS_W32_H2 32, 16
+PIXELSUB_PS_W32_H2 32, 24
+PIXELSUB_PS_W32_H2 32, 32
+PIXELSUB_PS_W32_H2 32, 64
+
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+%macro PIXELSUB_PS_W48_H2 2
+INIT_XMM sse4
+cglobal pixel_sub_ps_%1x%2, 6, 7, 7, dest, deststride, src0, src1, srcstride0, srcstride1
+
+add r1, r1
+mov r6d, %2/2
+pxor m6, m6
+
+.loop
+
+ movu m1, [r2]
+ pmovzxbw m0, m1
+ punpckhbw m1, m6
+ movu m3, [r3]
+ pmovzxbw m2, m3
+ punpckhbw m3, m6
+ movu m5, [r2 + 16]
+ pmovzxbw m4, m5
+ punpckhbw m5, m6
+
+ psubw m0, m2
+ psubw m1, m3
+
+ movu [r0], m0
+ movu [r0 + 16], m1
+
+ movu m3, [r3 + 16]
+ pmovzxbw m2, m3
+ punpckhbw m3, m6
+
+ psubw m4, m2
+ psubw m5, m3
+
+ movu [r0 + 32], m4
+ movu [r0 + 48], m5
+
+ movu m1, [r2 + 32]
+ pmovzxbw m0, m1
+ punpckhbw m1, m6
+ movu m3, [r3 + 32]
+ pmovzxbw m2, m3
+ punpckhbw m3, m6
+
+ psubw m0, m2
+ psubw m1, m3
+
+ movu [r0 + 64], m0
+ movu [r0 + 80], m1
+
+ movu m1, [r2 + r4]
+ pmovzxbw m0, m1
+ punpckhbw m1, m6
+ movu m3, [r3 + r5]
+ pmovzxbw m2, m3
+ punpckhbw m3, m6
+ movu m5, [r2 + r5 + 16]
+ pmovzxbw m4, m5
+ punpckhbw m5, m6
+
+ psubw m0, m2
+ psubw m1, m3
+
+ movu [r0 + r1], m0
+ movu [r0 + r1 + 16], m1
+
+ movu m3, [r3 + r4 + 16]
+ pmovzxbw m2, m3
+ punpckhbw m3, m6
+
+ psubw m4, m2
+ psubw m5, m3
+
+ movu [r0 + r1 + 32], m4
+ movu [r0 + r1 + 48], m5
+
+ movu m1, [r2 + r4 + 32]
+ pmovzxbw m0, m1
+ punpckhbw m1, m6
+ movu m3, [r3 + r5 + 32]
+ pmovzxbw m2, m3
+ punpckhbw m3, m6
+
+ psubw m0, m2
+ psubw m1, m3
+
+ movu [r0 + r1 + 64], m0
+ movu [r0 + r1 + 80], m1
+
+ lea r2, [r2 + 2 * r4]
+ lea r3, [r3 + 2 * r5]
+ lea r0, [r0 + 2 * r1]
+
+ dec r6d
+
+jnz .loop
+
+RET
+%endmacro
+
+PIXELSUB_PS_W48_H2 48, 64
+
+;-----------------------------------------------------------------------------
+; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
+;-----------------------------------------------------------------------------
+%macro PIXELSUB_PS_W64_H2 2
+INIT_XMM sse4
+cglobal pixel_sub_ps_%1x%2, 6, 7, 7, dest, deststride, src0, src1, srcstride0, srcstride1
+
+add r1, r1
+mov r6d, %2/2
+pxor m6, m6
+
+.loop
+
+ movu m1, [r2]
+ pmovzxbw m0, m1
+ punpckhbw m1, m6
+ movu m3, [r3]
+ pmovzxbw m2, m3
+ punpckhbw m3, m6
+ movu m5, [r2 + 16]
+ pmovzxbw m4, m5
+ punpckhbw m5, m6
+
+ psubw m0, m2
+ psubw m1, m3
+
+ movu [r0], m0
+ movu [r0 + 16], m1
+
+ movu m1, [r3 + 16]
+ pmovzxbw m0, m1
+ punpckhbw m1, m6
+ movu m3, [r2 + 32]
+ pmovzxbw m2, m3
+ punpckhbw m3, m6
+
+ psubw m4, m0
+ psubw m5, m1
+
+ movu [r0 + 32], m4
+ movu [r0 + 48], m5
+
+ movu m5, [r3 + 32]
+ pmovzxbw m4, m5
+ punpckhbw m5, m6
+ movu m1, [r2 + 48]
+ pmovzxbw m0, m1
+ punpckhbw m1, m6
+
+ psubw m2, m4
+ psubw m3, m5
+
+ movu [r0 + 64], m2
+ movu [r0 + 80], m3
+
+ movu m3, [r3 + 48]
+ pmovzxbw m2, m3
+ punpckhbw m3, m6
+ movu m5, [r2 + r4]
+ pmovzxbw m4, m5
+ punpckhbw m5, m6
+
+ psubw m0, m2
+ psubw m1, m3
+
+ movu [r0 + 96], m0
+ movu [r0 + 112], m1
+
+ movu m1, [r3 + r5]
+ pmovzxbw m0, m1
+ punpckhbw m1, m6
+ movu m3, [r2 + r4 + 16]
+ pmovzxbw m2, m3
+ punpckhbw m3, m6
+
+ psubw m4, m0
+ psubw m5, m1
+
+ movu [r0 + r1], m4
+ movu [r0 + r1 + 16], m5
+
+ movu m5, [r3 + r5 + 16]
+ pmovzxbw m4, m5
+ punpckhbw m5, m6
+ movu m1, [r2 + r4 + 32]
+ pmovzxbw m0, m1
+ punpckhbw m1, m6
+
+ psubw m2, m4
+ psubw m3, m5
+
+ movu [r0 + r1 + 32], m2
+ movu [r0 + r1 + 48], m3
+
+ movu m3, [r3 + r5 + 32]
+ pmovzxbw m2, m3
+ punpckhbw m3, m6
+ movu m5, [r2 + r4 + 48]
+ pmovzxbw m4, m5
+ punpckhbw m5, m6
+
+ psubw m0, m2
+ psubw m1, m3
+
+ movu [r0 + r1 + 64], m0
+ movu [r0 + r1 + 80], m1
+
+ movu m1, [r3 + r5 + 48]
+ pmovzxbw m0, m1
+ punpckhbw m1, m6
+
+ psubw m4, m0
+ psubw m5, m1
+
+ movu [r0 + r1 + 96], m4
+ movu [r0 + r1 + 112], m5
+
+ lea r2, [r2 + 2 * r4]
+ lea r3, [r3 + 2 * r5]
+ lea r0, [r0 + 2 * r1]
+
+ dec r6d
+
+jnz .loop
+
+RET
+%endmacro
+
+PIXELSUB_PS_W64_H2 64, 16
+PIXELSUB_PS_W64_H2 64, 32
+PIXELSUB_PS_W64_H2 64, 48
+PIXELSUB_PS_W64_H2 64, 64
More information about the x265-devel
mailing list