[x265] [PATCH] asm: intra_pred_ang32_18 improved by ~45% over SSE4
praveen at multicorewareinc.com
praveen at multicorewareinc.com
Tue Apr 14 08:30:54 CEST 2015
# HG changeset patch
# User Praveen Tiwari <praveen at multicorewareinc.com>
# Date 1428992352 -19800
# Tue Apr 14 11:49:12 2015 +0530
# Node ID 8c31f8daf9a2bbb3408178685eee97d84ca045ff
# Parent 9a0818c97dc72b7974889fd34de073cdb4fde771
asm: intra_pred_ang32_18 improved by ~45% over SSE4
AVX2:
intra_ang_32x32[18] 33.10x 354.58 11737.10
SSE4:
intra_ang_32x32[18] 17.51x 650.80 11396.64
diff -r 9a0818c97dc7 -r 8c31f8daf9a2 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Tue Apr 14 13:41:40 2015 +0800
+++ b/source/common/x86/asm-primitives.cpp Tue Apr 14 11:49:12 2015 +0530
@@ -1821,6 +1821,7 @@
p.cu[BLOCK_32x32].intra_pred[23] = x265_intra_pred_ang32_23_avx2;
p.cu[BLOCK_32x32].intra_pred[22] = x265_intra_pred_ang32_22_avx2;
p.cu[BLOCK_32x32].intra_pred[21] = x265_intra_pred_ang32_21_avx2;
+ p.cu[BLOCK_32x32].intra_pred[18] = x265_intra_pred_ang32_18_avx2;
// copy_sp primitives
p.cu[BLOCK_16x16].copy_sp = x265_blockcopy_sp_16x16_avx2;
diff -r 9a0818c97dc7 -r 8c31f8daf9a2 source/common/x86/intrapred.h
--- a/source/common/x86/intrapred.h Tue Apr 14 13:41:40 2015 +0800
+++ b/source/common/x86/intrapred.h Tue Apr 14 11:49:12 2015 +0530
@@ -277,6 +277,7 @@
void x265_intra_pred_ang32_23_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
void x265_intra_pred_ang32_22_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
void x265_intra_pred_ang32_21_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
+void x265_intra_pred_ang32_18_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
void x265_all_angs_pred_4x4_sse2(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
void x265_all_angs_pred_4x4_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
void x265_all_angs_pred_8x8_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
diff -r 9a0818c97dc7 -r 8c31f8daf9a2 source/common/x86/intrapred8.asm
--- a/source/common/x86/intrapred8.asm Tue Apr 14 13:41:40 2015 +0800
+++ b/source/common/x86/intrapred8.asm Tue Apr 14 11:49:12 2015 +0530
@@ -28,6 +28,7 @@
SECTION_RODATA 32
intra_pred_shuff_0_8: times 2 db 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8
+intra_pred_shuff_15_0: times 2 db 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
pb_0_8 times 8 db 0, 8
pb_unpackbw1 times 2 db 1, 8, 2, 8, 3, 8, 4, 8
@@ -10366,6 +10367,99 @@
RET
+INIT_YMM avx2
+cglobal intra_pred_ang32_18, 4, 4, 3
+ movu m0, [r2]
+ movu xm1, [r2 + 1 + 64]
+ pshufb xm1, [intra_pred_shuff_15_0]
+ mova xm2, xm0
+ vinserti128 m1, m1, xm2, 1
+
+ lea r3, [r1 * 3]
+
+ movu [r0], m0
+ palignr m2, m0, m1, 15
+ movu [r0 + r1], m2
+ palignr m2, m0, m1, 14
+ movu [r0 + r1 * 2], m2
+ palignr m2, m0, m1, 13
+ movu [r0 + r3], m2
+
+ lea r0, [r0 + r1 * 4]
+ palignr m2, m0, m1, 12
+ movu [r0], m2
+ palignr m2, m0, m1, 11
+ movu [r0 + r1], m2
+ palignr m2, m0, m1, 10
+ movu [r0 + r1 * 2], m2
+ palignr m2, m0, m1, 9
+ movu [r0 + r3], m2
+
+ lea r0, [r0 + r1 * 4]
+ palignr m2, m0, m1, 8
+ movu [r0], m2
+ palignr m2, m0, m1, 7
+ movu [r0 + r1], m2
+ palignr m2, m0, m1, 6
+ movu [r0 + r1 * 2], m2
+ palignr m2, m0, m1, 5
+ movu [r0 + r3], m2
+
+ lea r0, [r0 + r1 * 4]
+ palignr m2, m0, m1, 4
+ movu [r0], m2
+ palignr m2, m0, m1, 3
+ movu [r0 + r1], m2
+ palignr m2, m0, m1, 2
+ movu [r0 + r1 * 2], m2
+ palignr m2, m0, m1, 1
+ movu [r0 + r3], m2
+
+ lea r0, [r0 + r1 * 4]
+ movu [r0], m1
+
+ movu xm0, [r2 + 64 + 17]
+ pshufb xm0, [intra_pred_shuff_15_0]
+ vinserti128 m0, m0, xm1, 1
+
+ palignr m2, m1, m0, 15
+ movu [r0 + r1], m2
+ palignr m2, m1, m0, 14
+ movu [r0 + r1 * 2], m2
+ palignr m2, m1, m0, 13
+ movu [r0 + r3], m2
+
+ lea r0, [r0 + r1 * 4]
+ palignr m2, m1, m0, 12
+ movu [r0], m2
+ palignr m2, m1, m0, 11
+ movu [r0 + r1], m2
+ palignr m2, m1, m0, 10
+ movu [r0 + r1 * 2], m2
+ palignr m2, m1, m0, 9
+ movu [r0 + r3], m2
+
+ lea r0, [r0 + r1 * 4]
+ palignr m2, m1, m0, 8
+ movu [r0], m2
+ palignr m2, m1, m0, 7
+ movu [r0 + r1], m2
+ palignr m2, m1, m0,6
+ movu [r0 + r1 * 2], m2
+ palignr m2, m1, m0, 5
+ movu [r0 + r3], m2
+
+ lea r0, [r0 + r1 * 4]
+ palignr m2, m1, m0, 4
+ movu [r0], m2
+ palignr m2, m1, m0, 3
+ movu [r0 + r1], m2
+ palignr m2, m1, m0,2
+ movu [r0 + r1 * 2], m2
+ palignr m2, m1, m0, 1
+ movu [r0 + r3], m2
+ RET
+
INIT_XMM sse4
cglobal intra_pred_ang32_18, 4,5,5
movu m0, [r2] ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0]
More information about the x265-devel
mailing list