[x265] [PATCH Review only] asm-sse2: 16bpp code for filter_p2s[4x4](2.47x), filter_p2s[4x8](2.78x),
rajesh at multicorewareinc.com
rajesh at multicorewareinc.com
Mon Mar 2 15:25:29 CET 2015
# HG changeset patch
# User Rajesh Paulraj<rajesh at multicorewareinc.com>
# Date 1425306286 -19800
# Mon Mar 02 19:54:46 2015 +0530
# Node ID b2897cf788eca68d2b5ef919d2de8c91fa759484
# Parent 70be3fa2ee550ec1b954c420e3c7a915589163a7
asm-sse2: 16bpp code for filter_p2s[4x4](2.47x), filter_p2s[4x8](2.78x),
filter_p2s[4x16](2.95x), filter_p2s[8x4](4.07x), filter_p2s[8x8](4.54x),
filter_p2s[8x16](4.40x), filter_p2s[8x32](4.67x), filter_p2s[16x4](6.19x),
filter_p2s[16x8](7.51x), filter_p2s[16x12](7.35x), filter_p2s[16x16](7.30x),
filter_p2s[16x32](7.29x), filter_p2s[16x64](7.22x), filter_p2s[32x8](6.26x),
filter_p2s[32x16](6.92x), filter_p2s[32x24](6.18x), filter_p2s[32x32](6.91x),
filter_p2s[32x64](6.69x), filter_p2s[64x16](6.13x), filter_p2s[64x32](8.10x),
filter_p2s[64x48](5.89x), filter_p2s[64x64](8.03x)
diff -r 70be3fa2ee55 -r b2897cf788ec source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Mon Mar 02 10:52:59 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp Mon Mar 02 19:54:46 2015 +0530
@@ -855,7 +855,29 @@
PIXEL_AVG_W4(mmx2);
LUMA_VAR(sse2);
- p.luma_p2s = x265_luma_p2s_sse2;
+ p.pu[LUMA_4x4].filter_p2s = x265_pixelToShort_4x4_sse2;
+ p.pu[LUMA_4x8].filter_p2s = x265_pixelToShort_4x8_sse2;
+ p.pu[LUMA_4x16].filter_p2s = x265_pixelToShort_4x16_sse2;
+ p.pu[LUMA_8x4].filter_p2s = x265_pixelToShort_8x4_sse2;
+ p.pu[LUMA_8x8].filter_p2s = x265_pixelToShort_8x8_sse2;
+ p.pu[LUMA_8x16].filter_p2s = x265_pixelToShort_8x16_sse2;
+ p.pu[LUMA_8x32].filter_p2s = x265_pixelToShort_8x32_sse2;
+ p.pu[LUMA_16x4].filter_p2s = x265_pixelToShort_16x4_sse2;
+ p.pu[LUMA_16x8].filter_p2s = x265_pixelToShort_16x8_sse2;
+ p.pu[LUMA_16x12].filter_p2s = x265_pixelToShort_16x12_sse2;
+ p.pu[LUMA_16x16].filter_p2s = x265_pixelToShort_16x16_sse2;
+ p.pu[LUMA_16x32].filter_p2s = x265_pixelToShort_16x32_sse2;
+ p.pu[LUMA_16x64].filter_p2s = x265_pixelToShort_16x64_sse2;
+ p.pu[LUMA_32x8].filter_p2s = x265_pixelToShort_32x8_sse2;
+ p.pu[LUMA_32x16].filter_p2s = x265_pixelToShort_32x16_sse2;
+ p.pu[LUMA_32x24].filter_p2s = x265_pixelToShort_32x24_sse2;
+ p.pu[LUMA_32x32].filter_p2s = x265_pixelToShort_32x32_sse2;
+ p.pu[LUMA_32x64].filter_p2s = x265_pixelToShort_32x64_sse2;
+ p.pu[LUMA_64x16].filter_p2s = x265_pixelToShort_64x16_sse2;
+ p.pu[LUMA_64x32].filter_p2s = x265_pixelToShort_64x32_sse2;
+ p.pu[LUMA_64x48].filter_p2s = x265_pixelToShort_64x48_sse2;
+ p.pu[LUMA_64x64].filter_p2s = x265_pixelToShort_64x64_sse2;
+
p.chroma[X265_CSP_I420].p2s = x265_chroma_p2s_sse2;
p.chroma[X265_CSP_I422].p2s = x265_chroma_p2s_sse2;
diff -r 70be3fa2ee55 -r b2897cf788ec source/common/x86/ipfilter16.asm
--- a/source/common/x86/ipfilter16.asm Mon Mar 02 10:52:59 2015 +0530
+++ b/source/common/x86/ipfilter16.asm Mon Mar 02 19:54:46 2015 +0530
@@ -5525,17 +5525,17 @@
FILTER_VER_LUMA_SS 64, 16
FILTER_VER_LUMA_SS 16, 64
-;--------------------------------------------------------------------------------------------------
-; void filterConvertPelToShort(pixel *src, intptr_t srcStride, int16_t *dst, int width, int height)
-;--------------------------------------------------------------------------------------------------
+;-----------------------------------------------------------------------------
+; void pixelToShort(pixel *src, intptr_t srcStride, int16_t *dst)
+;-----------------------------------------------------------------------------
+%macro PIXEL_WH_4xN 2
INIT_XMM sse2
-cglobal luma_p2s, 3, 7, 5
-
+cglobal pixelToShort_%1x%2, 3, 7, 5
add r1, r1
; load width and height
- mov r3d, r3m
- mov r4d, r4m
+ mov r3d, %1
+ mov r4d, %2
; load constant
mova m4, [tab_c_n8192]
@@ -5585,5 +5585,267 @@
sub r4d, 4
jnz .loopH
-
RET
+%endmacro
+PIXEL_WH_4xN 4, 4
+PIXEL_WH_4xN 4, 8
+PIXEL_WH_4xN 4, 16
+
+;-----------------------------------------------------------------------------
+; void pixelToShort(pixel *src, intptr_t srcStride, int16_t *dst)
+;-----------------------------------------------------------------------------
+%macro PIXEL_WH_8xN 2
+INIT_XMM sse2
+cglobal pixelToShort_%1x%2, 3, 7, 5
+
+ add r1, r1
+
+ ; load width and height
+ mov r3d, %1
+ mov r4d, %2
+
+ ; load constant
+ mova m4, [tab_c_n8192]
+
+.loopH:
+
+ xor r5d, r5d
+.loopW:
+ lea r6, [r0 + r5 * 2]
+
+ movu m0, [r6]
+ psllw m0, 4
+ paddw m0, m4
+
+ movu m1, [r6 + r1]
+ psllw m1, 4
+ paddw m1, m4
+
+ movu m2, [r6 + r1 * 2]
+ psllw m2, 4
+ paddw m2, m4
+
+ lea r6, [r6 + r1 * 2]
+ movu m3, [r6 + r1]
+ psllw m3, 4
+ paddw m3, m4
+
+ add r5, 8
+ cmp r5, r3
+
+ movu [r2 + FENC_STRIDE * 0], m0
+ movu [r2 + FENC_STRIDE * 2], m1
+ movu [r2 + FENC_STRIDE * 4], m2
+ movu [r2 + FENC_STRIDE * 6], m3
+
+ je .nextH
+ jmp .loopW
+
+.nextH:
+ lea r0, [r0 + r1 * 4]
+ add r2, FENC_STRIDE * 8
+
+ sub r4d, 4
+ jnz .loopH
+ RET
+%endmacro
+PIXEL_WH_8xN 8, 8
+PIXEL_WH_8xN 8, 4
+PIXEL_WH_8xN 8, 16
+PIXEL_WH_8xN 8, 32
+
+;-----------------------------------------------------------------------------
+; void pixelToShort(pixel *src, intptr_t srcStride, int16_t *dst)
+;-----------------------------------------------------------------------------
+%macro PIXEL_WH_16xN 2
+INIT_XMM sse2
+cglobal pixelToShort_%1x%2, 3, 7, 5
+
+ add r1, r1
+
+ ; load width and height
+ mov r3d, %1
+ mov r4d, %2
+
+ ; load constant
+ mova m4, [tab_c_n8192]
+
+.loopH:
+
+ xor r5d, r5d
+.loopW:
+ lea r6, [r0 + r5 * 2]
+
+ movu m0, [r6]
+ psllw m0, 4
+ paddw m0, m4
+
+ movu m1, [r6 + r1]
+ psllw m1, 4
+ paddw m1, m4
+
+ movu m2, [r6 + r1 * 2]
+ psllw m2, 4
+ paddw m2, m4
+
+ lea r6, [r6 + r1 * 2]
+ movu m3, [r6 + r1]
+ psllw m3, 4
+ paddw m3, m4
+
+ add r5, 8
+ cmp r5, r3
+
+ movu [r2 + r5 * 2 + FENC_STRIDE * 0 - 16], m0
+ movu [r2 + r5 * 2 + FENC_STRIDE * 2 - 16], m1
+ movu [r2 + r5 * 2 + FENC_STRIDE * 4 - 16], m2
+ movu [r2 + r5 * 2 + FENC_STRIDE * 6 - 16], m3
+ je .nextH
+ jmp .loopW
+
+.nextH:
+ lea r0, [r0 + r1 * 4]
+ add r2, FENC_STRIDE * 8
+
+ sub r4d, 4
+ jnz .loopH
+ RET
+%endmacro
+PIXEL_WH_16xN 16, 4
+PIXEL_WH_16xN 16, 8
+PIXEL_WH_16xN 16, 12
+PIXEL_WH_16xN 16, 16
+PIXEL_WH_16xN 16, 32
+PIXEL_WH_16xN 16, 64
+
+;-----------------------------------------------------------------------------
+; void pixelToShort(pixel *src, intptr_t srcStride, int16_t *dst)
+;-----------------------------------------------------------------------------
+%macro PIXEL_WH_32xN 2
+INIT_XMM sse2
+cglobal pixelToShort_%1x%2, 3, 7, 5
+
+ add r1, r1
+
+ ; load width and height
+ mov r3d, %1
+ mov r4d, %2
+
+ ; load constant
+ mova m4, [tab_c_n8192]
+
+.loopH:
+
+ xor r5d, r5d
+.loopW:
+ lea r6, [r0 + r5 * 2]
+
+ movu m0, [r6]
+ psllw m0, 4
+ paddw m0, m4
+
+ movu m1, [r6 + r1]
+ psllw m1, 4
+ paddw m1, m4
+
+ movu m2, [r6 + r1 * 2]
+ psllw m2, 4
+ paddw m2, m4
+
+ lea r6, [r6 + r1 * 2]
+ movu m3, [r6 + r1]
+ psllw m3, 4
+ paddw m3, m4
+
+ add r5, 8
+ cmp r5, r3
+
+ movu [r2 + r5 * 2 + FENC_STRIDE * 0 - 16], m0
+ movu [r2 + r5 * 2 + FENC_STRIDE * 2 - 16], m1
+ movu [r2 + r5 * 2 + FENC_STRIDE * 4 - 16], m2
+ movu [r2 + r5 * 2 + FENC_STRIDE * 6 - 16], m3
+
+ je .nextH
+ jmp .loopW
+
+.nextH:
+ lea r0, [r0 + r1 * 4]
+ add r2, FENC_STRIDE * 8
+
+ sub r4d, 4
+ jnz .loopH
+ RET
+%endmacro
+PIXEL_WH_32xN 32, 8
+PIXEL_WH_32xN 32, 16
+PIXEL_WH_32xN 32, 24
+PIXEL_WH_32xN 32, 32
+PIXEL_WH_32xN 32, 64
+
+;-----------------------------------------------------------------------------
+; void pixelToShort(pixel *src, intptr_t srcStride, int16_t *dst)
+;-----------------------------------------------------------------------------
+%macro PIXEL_WH_64xN 2
+INIT_XMM sse2
+cglobal pixelToShort_%1x%2, 3, 7, 5
+
+ add r1, r1
+
+ ; load width and height
+ mov r3d, %1
+ mov r4d, %2
+
+ ; load constant
+ mova m4, [tab_c_n8192]
+
+.loopH:
+
+ xor r5d, r5d
+.loopW:
+ lea r6, [r0 + r5 * 2]
+
+ movu m0, [r6]
+ psllw m0, 4
+ paddw m0, m4
+
+ movu m1, [r6 + r1]
+ psllw m1, 4
+ paddw m1, m4
+
+ movu m2, [r6 + r1 * 2]
+ psllw m2, 4
+ paddw m2, m4
+
+ lea r6, [r6 + r1 * 2]
+ movu m3, [r6 + r1]
+ psllw m3, 4
+ paddw m3, m4
+
+ add r5, 8
+ cmp r5, r3
+ jg .width4
+ movu [r2 + r5 * 2 + FENC_STRIDE * 0 - 16], m0
+ movu [r2 + r5 * 2 + FENC_STRIDE * 2 - 16], m1
+ movu [r2 + r5 * 2 + FENC_STRIDE * 4 - 16], m2
+ movu [r2 + r5 * 2 + FENC_STRIDE * 6 - 16], m3
+ je .nextH
+ jmp .loopW
+
+.width4:
+ movh [r2 + r5 * 2 + FENC_STRIDE * 0 - 16], m0
+ movh [r2 + r5 * 2 + FENC_STRIDE * 2 - 16], m1
+ movh [r2 + r5 * 2 + FENC_STRIDE * 4 - 16], m2
+ movh [r2 + r5 * 2 + FENC_STRIDE * 6 - 16], m3
+
+.nextH:
+ lea r0, [r0 + r1 * 4]
+ add r2, FENC_STRIDE * 8
+
+ sub r4d, 4
+ jnz .loopH
+ RET
+%endmacro
+PIXEL_WH_64xN 64, 16
+PIXEL_WH_64xN 64, 32
+PIXEL_WH_64xN 64, 48
+PIXEL_WH_64xN 64, 64
diff -r 70be3fa2ee55 -r b2897cf788ec source/common/x86/ipfilter8.h
--- a/source/common/x86/ipfilter8.h Mon Mar 02 10:52:59 2015 +0530
+++ b/source/common/x86/ipfilter8.h Mon Mar 02 19:54:46 2015 +0530
@@ -290,7 +290,28 @@
SETUP_CHROMA_420_HORIZ_FUNC_DEF(16, 64, cpu)
void x265_chroma_p2s_sse2(const pixel* src, intptr_t srcStride, int16_t* dst, int width, int height);
-void x265_luma_p2s_sse2(const pixel* src, intptr_t srcStride, int16_t* dst, int width, int height);
+void x265_pixelToShort_4x4_sse2(const pixel* src, intptr_t srcStride, int16_t* dst);
+void x265_pixelToShort_4x8_sse2(const pixel* src, intptr_t srcStride, int16_t* dst);
+void x265_pixelToShort_4x16_sse2(const pixel* src, intptr_t srcStride, int16_t* dst);
+void x265_pixelToShort_8x4_sse2(const pixel* src, intptr_t srcStride, int16_t* dst);
+void x265_pixelToShort_8x8_sse2(const pixel* src, intptr_t srcStride, int16_t* dst);
+void x265_pixelToShort_8x16_sse2(const pixel* src, intptr_t srcStride, int16_t* dst);
+void x265_pixelToShort_8x32_sse2(const pixel* src, intptr_t srcStride, int16_t* dst);
+void x265_pixelToShort_16x4_sse2(const pixel* src, intptr_t srcStride, int16_t* dst);
+void x265_pixelToShort_16x8_sse2(const pixel* src, intptr_t srcStride, int16_t* dst);
+void x265_pixelToShort_16x12_sse2(const pixel* src, intptr_t srcStride, int16_t* dst);
+void x265_pixelToShort_16x16_sse2(const pixel* src, intptr_t srcStride, int16_t* dst);
+void x265_pixelToShort_16x32_sse2(const pixel* src, intptr_t srcStride, int16_t* dst);
+void x265_pixelToShort_16x64_sse2(const pixel* src, intptr_t srcStride, int16_t* dst);
+void x265_pixelToShort_32x8_sse2(const pixel* src, intptr_t srcStride, int16_t* dst);
+void x265_pixelToShort_32x16_sse2(const pixel* src, intptr_t srcStride, int16_t* dst);
+void x265_pixelToShort_32x24_sse2(const pixel* src, intptr_t srcStride, int16_t* dst);
+void x265_pixelToShort_32x32_sse2(const pixel* src, intptr_t srcStride, int16_t* dst);
+void x265_pixelToShort_32x64_sse2(const pixel* src, intptr_t srcStride, int16_t* dst);
+void x265_pixelToShort_64x16_sse2(const pixel* src, intptr_t srcStride, int16_t* dst);
+void x265_pixelToShort_64x32_sse2(const pixel* src, intptr_t srcStride, int16_t* dst);
+void x265_pixelToShort_64x48_sse2(const pixel* src, intptr_t srcStride, int16_t* dst);
+void x265_pixelToShort_64x64_sse2(const pixel* src, intptr_t srcStride, int16_t* dst);
CHROMA_420_VERT_FILTERS(_sse2);
CHROMA_420_HORIZ_FILTERS(_sse4);
More information about the x265-devel
mailing list