[x265] [PATCH] asm: 10bpp code for pixel_sub_32xN
murugan at multicorewareinc.com
murugan at multicorewareinc.com
Fri Dec 6 08:52:45 CET 2013
# HG changeset patch
# User Murugan Vairavel <murugan at multicorewareinc.com>
# Date 1386316278 -19800
# Fri Dec 06 13:21:18 2013 +0530
# Node ID 6469fd3931593a0c64e12891dcf1162e49f3083a
# Parent 5feafa073b2990c5e8a2945dd923bbfa058cad6f
asm: 10bpp code for pixel_sub_32xN
diff -r 5feafa073b29 -r 6469fd393159 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Fri Dec 06 12:57:32 2013 +0530
+++ b/source/common/x86/asm-primitives.cpp Fri Dec 06 13:21:18 2013 +0530
@@ -652,7 +652,12 @@
p.chroma[X265_CSP_I420].sub_ps[LUMA_32x32] = x265_pixel_sub_ps_16x16_sse2;
p.chroma[X265_CSP_I420].sub_ps[LUMA_32x64] = x265_pixel_sub_ps_16x32_sse2;
p.chroma[X265_CSP_I420].sub_ps[LUMA_48x64] = x265_pixel_sub_ps_24x32_sse2;
+ p.chroma[X265_CSP_I420].sub_ps[LUMA_64x16] = x265_pixel_sub_ps_32x8_sse2;
+ p.chroma[X265_CSP_I420].sub_ps[LUMA_64x32] = x265_pixel_sub_ps_32x16_sse2;
+ p.chroma[X265_CSP_I420].sub_ps[LUMA_64x48] = x265_pixel_sub_ps_32x24_sse2;
+ p.chroma[X265_CSP_I420].sub_ps[LUMA_64x64] = x265_pixel_sub_ps_32x32_sse2;
p.luma_sub_ps[LUMA_16x64] = x265_pixel_sub_ps_16x64_sse2;
+ p.luma_sub_ps[LUMA_32x64] = x265_pixel_sub_ps_32x64_sse2;
}
if (cpuMask & X265_CPU_SSSE3)
{
diff -r 5feafa073b29 -r 6469fd393159 source/common/x86/pixel-util8.asm
--- a/source/common/x86/pixel-util8.asm Fri Dec 06 12:57:32 2013 +0530
+++ b/source/common/x86/pixel-util8.asm Fri Dec 06 13:21:18 2013 +0530
@@ -3165,36 +3165,70 @@
; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
;-----------------------------------------------------------------------------
%macro PIXELSUB_PS_W32_H2 2
-INIT_XMM sse4
-cglobal pixel_sub_ps_%1x%2, 6, 7, 8, dest, deststride, src0, src1, srcstride0, srcstride1
-
-add r1, r1
-mov r6d, %2/2
-
+cglobal pixel_sub_ps_%1x%2, 6, 7, 6, dest, deststride, src0, src1, srcstride0, srcstride1
+ add r1, r1
+ mov r6d, %2/2
+%if HIGH_BIT_DEPTH
+ add r4, r4
+ add r5, r5
.loop
-
+ movu m0, [r2]
+ movu m1, [r3]
+ movu m2, [r2 + 16]
+ movu m3, [r3 + 16]
+ movu m4, [r2 + 32]
+ movu m5, [r3 + 32]
+ psubw m0, m1
+ psubw m2, m3
+ psubw m4, m5
+ movu m3, [r2 + 48]
+ movu m5, [r3 + 48]
+ psubw m3, m5
+
+ movu [r0], m0
+ movu [r0 + 16], m2
+ movu [r0 + 32], m4
+ movu [r0 + 48], m3
+
+ movu m0, [r2 + r4]
+ movu m1, [r3 + r5]
+ movu m2, [r2 + r4 + 16]
+ movu m3, [r3 + r5 + 16]
+ movu m4, [r2 + r4 + 32]
+ movu m5, [r3 + r5 + 32]
+ psubw m0, m1
+ psubw m2, m3
+ psubw m4, m5
+ movu m3, [r2 + r4 + 48]
+ movu m5, [r3 + r5 + 48]
+ psubw m3, m5
+
+ movu [r0 + r1], m0
+ movu [r0 + r1 + 16], m2
+ movu [r0 + r1 + 32], m4
+ movu [r0 + r1 + 48], m3
+%else
+.loop
movh m0, [r2]
movh m1, [r2 + 8]
movh m2, [r2 + 16]
- movh m3, [r2 + 24]
- movh m4, [r3]
- movh m5, [r3 + 8]
- movh m6, [r3 + 16]
- movh m7, [r3 + 24]
-
+ movh m3, [r3]
+ movh m4, [r3 + 8]
+ movh m5, [r3 + 16]
pmovzxbw m0, m0
pmovzxbw m1, m1
pmovzxbw m2, m2
pmovzxbw m3, m3
pmovzxbw m4, m4
pmovzxbw m5, m5
- pmovzxbw m6, m6
- pmovzxbw m7, m7
-
- psubw m0, m4
- psubw m1, m5
- psubw m2, m6
- psubw m3, m7
+ psubw m0, m3
+ psubw m1, m4
+ psubw m2, m5
+ movh m3, [r2 + 24]
+ movh m4, [r3 + 24]
+ pmovzxbw m4, m4
+ pmovzxbw m3, m3
+ psubw m3, m4
movu [r0], m0
movu [r0 + 16], m1
@@ -3204,47 +3238,52 @@
movh m0, [r2 + r4]
movh m1, [r2 + r4 + 8]
movh m2, [r2 + r4 + 16]
- movh m3, [r2 + r4 + 24]
- movh m4, [r3 + r5]
- movh m5, [r3 + r5 + 8]
- movh m6, [r3 + r5 + 16]
- movh m7, [r3 + r5 + 24]
-
+ movh m3, [r3 + r5]
+ movh m4, [r3 + r5 + 8]
+ movh m5, [r3 + r5 + 16]
pmovzxbw m0, m0
pmovzxbw m1, m1
pmovzxbw m2, m2
pmovzxbw m3, m3
pmovzxbw m4, m4
pmovzxbw m5, m5
- pmovzxbw m6, m6
- pmovzxbw m7, m7
-
- psubw m0, m4
- psubw m1, m5
- psubw m2, m6
- psubw m3, m7
+ psubw m0, m3
+ psubw m1, m4
+ psubw m2, m5
+ movh m3, [r2 + r4 + 24]
+ movh m4, [r3 + r5 + 24]
+ pmovzxbw m3, m3
+ pmovzxbw m4, m4
+ psubw m3, m4
movu [r0 + r1], m0
movu [r0 + r1 + 16], m1
movu [r0 + r1 + 32], m2
movu [r0 + r1 + 48], m3
-
+%endif
+ dec r6d
lea r2, [r2 + 2 * r4]
lea r3, [r3 + 2 * r5]
lea r0, [r0 + 2 * r1]
-
- dec r6d
-
-jnz .loop
-
-RET
+ jnz .loop
+ RET
%endmacro
+%if HIGH_BIT_DEPTH
+INIT_XMM sse2
PIXELSUB_PS_W32_H2 32, 8
PIXELSUB_PS_W32_H2 32, 16
PIXELSUB_PS_W32_H2 32, 24
PIXELSUB_PS_W32_H2 32, 32
PIXELSUB_PS_W32_H2 32, 64
+%else
+INIT_XMM sse4
+PIXELSUB_PS_W32_H2 32, 8
+PIXELSUB_PS_W32_H2 32, 16
+PIXELSUB_PS_W32_H2 32, 24
+PIXELSUB_PS_W32_H2 32, 32
+PIXELSUB_PS_W32_H2 32, 64
+%endif
;-----------------------------------------------------------------------------
; void pixel_sub_ps_c_%1x%2(int16_t *dest, intptr_t destride, pixel *src0, pixel *src1, intptr_t srcstride0, intptr_t srcstride1);
More information about the x265-devel
mailing list