[x265] [PATCH] asm: 10bpp code for pixel_add_ps_8xN
murugan at multicorewareinc.com
murugan at multicorewareinc.com
Mon Dec 9 08:11:38 CET 2013
# HG changeset patch
# User Murugan Vairavel <murugan at multicorewareinc.com>
# Date 1386573027 -19800
# Mon Dec 09 12:40:27 2013 +0530
# Node ID 60ff94e0b264b33ee57300cfb3ff021ca7e133ee
# Parent 02b888130ed4fa64b5632544bea6a74e3e6f375b
asm: 10bpp code for pixel_add_ps_8xN
diff -r 02b888130ed4 -r 60ff94e0b264 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Mon Dec 09 12:13:29 2013 +0530
+++ b/source/common/x86/asm-primitives.cpp Mon Dec 09 12:40:27 2013 +0530
@@ -671,6 +671,12 @@
p.chroma[X265_CSP_I420].add_ps[CHROMA_4x8] = x265_pixel_add_ps_4x8_sse2;
p.chroma[X265_CSP_I420].add_ps[CHROMA_4x16] = x265_pixel_add_ps_4x16_sse2;
p.chroma[X265_CSP_I420].add_ps[CHROMA_6x8] = x265_pixel_add_ps_6x8_sse2;
+ p.chroma[X265_CSP_I420].add_ps[CHROMA_8x2] = x265_pixel_add_ps_8x2_sse2;
+ p.chroma[X265_CSP_I420].add_ps[CHROMA_8x4] = x265_pixel_add_ps_8x4_sse2;
+ p.chroma[X265_CSP_I420].add_ps[CHROMA_8x6] = x265_pixel_add_ps_8x6_sse2;
+ p.chroma[X265_CSP_I420].add_ps[CHROMA_8x8] = x265_pixel_add_ps_8x8_sse2;
+ p.chroma[X265_CSP_I420].add_ps[CHROMA_8x16] = x265_pixel_add_ps_8x16_sse2;
+ p.chroma[X265_CSP_I420].add_ps[CHROMA_8x32] = x265_pixel_add_ps_8x32_sse2;
}
if (cpuMask & X265_CPU_SSSE3)
{
diff -r 02b888130ed4 -r 60ff94e0b264 source/common/x86/pixeladd8.asm
--- a/source/common/x86/pixeladd8.asm Mon Dec 09 12:13:29 2013 +0530
+++ b/source/common/x86/pixeladd8.asm Mon Dec 09 12:40:27 2013 +0530
@@ -496,6 +496,27 @@
;-----------------------------------------------------------------------------
; void pixel_add_ps_8x2(pixel *dest, intptr_t destride, pixel *src0, int16_t *scr1, intptr_t srcStride0, intptr_t srcStride1)
;-----------------------------------------------------------------------------
+%if HIGH_BIT_DEPTH
+INIT_XMM sse2
+cglobal pixel_add_ps_8x2, 6, 6, 6, dest, destride, src0, scr1, srcStride0, srcStride1
+ add r1, r1
+ add r4, r4
+ add r5, r5
+ pxor m4, m4
+ mova m5, [pw_pixel_max]
+
+ movu m0, [r2]
+ movu m1, [r3]
+ movu m2, [r2 + r4]
+ movu m3, [r3 + r5]
+ paddw m0, m1
+ paddw m2, m3
+ CLIPW m0, m4, m5
+ CLIPW m2, m4, m5
+
+ movu [r0], m0
+ movu [r0 + r1], m2
+%else
INIT_XMM sse4
cglobal pixel_add_ps_8x2, 6, 6, 2, dest, destride, src0, scr1, srcStride0, srcStride1
@@ -516,58 +537,65 @@
packuswb m0, m0
movh [r0 + r1], m0
-
-RET
-
-;-----------------------------------------------------------------------------
-; void pixel_add_ps_8x4(pixel *dest, intptr_t destride, pixel *src0, int16_t *scr1, intptr_t srcStride0, intptr_t srcStride1)
-;-----------------------------------------------------------------------------
-INIT_XMM sse4
-cglobal pixel_add_ps_8x4, 6, 6, 2, dest, destride, src0, scr1, srcStride0, srcStride1
-
-add r5, r5
-
-pmovzxbw m0, [r2]
-movu m1, [r3]
-
-paddw m0, m1
-packuswb m0, m0
-
-movh [r0], m0
-
-pmovzxbw m0, [r2 + r4]
-movu m1, [r3 + r5]
-
-paddw m0, m1
-packuswb m0, m0
-
-movh [r0 + r1], m0
-
-pmovzxbw m0, [r2 + 2 * r4]
-movu m1, [r3 + 2 * r5]
-
-paddw m0, m1
-packuswb m0, m0
-
-movh [r0 + 2 * r1], m0
-
-lea r0, [r0 + 2 * r1]
-lea r2, [r2 + 2 * r4]
-lea r3, [r3 + 2 * r5]
-
-pmovzxbw m0, [r2 + r4]
-movu m1, [r3 + r5]
-
-paddw m0, m1
-packuswb m0, m0
-
-movh [r0 + r1], m0
-
+%endif
RET
;-----------------------------------------------------------------------------
; void pixel_add_ps_8x6(pixel *dest, intptr_t destride, pixel *src0, int16_t *scr1, intptr_t srcStride0, intptr_t srcStride1)
;-----------------------------------------------------------------------------
+%if HIGH_BIT_DEPTH
+INIT_XMM sse2
+cglobal pixel_add_ps_8x6, 6, 6, 6, dest, destride, src0, scr1, srcStride0, srcStride1
+ add r1, r1
+ add r4, r4
+ add r5, r5
+ pxor m4, m4
+ mova m5, [pw_pixel_max]
+
+ movu m0, [r2]
+ movu m1, [r3]
+ movu m2, [r2 + r4]
+ movu m3, [r3 + r5]
+ paddw m0, m1
+ paddw m2, m3
+ CLIPW m0, m4, m5
+ CLIPW m2, m4, m5
+
+ movu [r0], m0
+ movu [r0 + r1], m2
+
+ lea r2, [r2 + 2 * r4]
+ lea r3, [r3 + 2 * r5]
+ lea r0, [r0 + 2 * r1]
+
+ movu m0, [r2]
+ movu m1, [r3]
+ movu m2, [r2 + r4]
+ movu m3, [r3 + r5]
+ paddw m0, m1
+ paddw m2, m3
+ CLIPW m0, m4, m5
+ CLIPW m2, m4, m5
+
+ movu [r0], m0
+ movu [r0 + r1], m2
+
+ lea r2, [r2 + 2 * r4]
+ lea r3, [r3 + 2 * r5]
+ lea r0, [r0 + 2 * r1]
+
+ movu m0, [r2]
+ movu m1, [r3]
+ movu m2, [r2 + r4]
+ movu m3, [r3 + r5]
+ paddw m0, m1
+ paddw m2, m3
+ CLIPW m0, m4, m5
+ CLIPW m2, m4, m5
+
+ movu [r0], m0
+ movu [r0 + r1], m2
+%else
INIT_XMM sse4
cglobal pixel_add_ps_8x6, 6, 6, 2, dest, destride, src0, scr1, srcStride0, srcStride1
@@ -628,13 +656,51 @@
packuswb m0, m0
movh [r0 + r1], m0
-
+%endif
RET
;-----------------------------------------------------------------------------
; void pixel_add_ps_%1x%2(pixel *dest, intptr_t destride, pixel *src0, int16_t *scr1, intptr_t srcStride0, intptr_t srcStride1)
;-----------------------------------------------------------------------------
%macro PIXEL_ADD_PS_W8_H4 2
+%if HIGH_BIT_DEPTH
+INIT_XMM sse2
+cglobal pixel_add_ps_%1x%2, 6, 7, 6, dest, destride, src0, scr1, srcStride0, srcStride1
+ mov r6d, %2/4
+ add r1, r1
+ add r4, r4
+ add r5, r5
+ pxor m4, m4
+ mova m5, [pw_pixel_max]
+.loop
+ movu m0, [r2]
+ movu m1, [r3]
+ movu m2, [r2 + r4]
+ movu m3, [r3 + r5]
+ paddw m0, m1
+ paddw m2, m3
+ CLIPW m0, m4, m5
+ CLIPW m2, m4, m5
+
+ movu [r0], m0
+ movu [r0 + r1], m2
+
+ lea r2, [r2 + 2 * r4]
+ lea r3, [r3 + 2 * r5]
+ lea r0, [r0 + 2 * r1]
+
+ movu m0, [r2]
+ movu m1, [r3]
+ movu m2, [r2 + r4]
+ movu m3, [r3 + r5]
+ paddw m0, m1
+ paddw m2, m3
+ CLIPW m0, m4, m5
+ CLIPW m2, m4, m5
+
+ movu [r0], m0
+ movu [r0 + r1], m2
+%else
INIT_XMM sse4
cglobal pixel_add_ps_%1x%2, 6, 7, 2, dest, destride, src0, scr1, srcStride0, srcStride1
@@ -678,7 +744,7 @@
packuswb m0, m0
movh [r0 + r1], m0
-
+%endif
lea r0, [r0 + 2 * r1]
lea r2, [r2 + 2 * r4]
lea r3, [r3 + 2 * r5]
@@ -689,6 +755,7 @@
RET
%endmacro
+PIXEL_ADD_PS_W8_H4 8, 4
PIXEL_ADD_PS_W8_H4 8, 8
PIXEL_ADD_PS_W8_H4 8, 16
PIXEL_ADD_PS_W8_H4 8, 32
More information about the x265-devel
mailing list