[x265] [PATCH 4 of 9] asm-intra_pred_ang16_33: improve ~16% on SSE4
praveen at multicorewareinc.com
praveen at multicorewareinc.com
Tue Mar 17 06:11:05 CET 2015
# HG changeset patch
# User Praveen Tiwari <praveen at multicorewareinc.com>
# Date 1426514115 -19800
# Node ID 9f7f64adb536daedb6ba740649c4f55e8f63cd99
# Parent b74b3d65a1092102e92cf7f9383b48c825abad3c
asm-intra_pred_ang16_33: improve ~16% on SSE4
AVX2:
intra_ang_16x16[33] 10.35x 781.74 8092.85
SSE4:
intra_ang_16x16[33] 8.65x 937.89 8116.52
diff -r b74b3d65a109 -r 9f7f64adb536 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Mon Mar 16 19:11:34 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp Mon Mar 16 19:25:15 2015 +0530
@@ -1515,6 +1515,7 @@
p.cu[BLOCK_16x16].intra_pred[30] = x265_intra_pred_ang16_30_avx2;
p.cu[BLOCK_16x16].intra_pred[31] = x265_intra_pred_ang16_31_avx2;
p.cu[BLOCK_16x16].intra_pred[32] = x265_intra_pred_ang16_32_avx2;
+ p.cu[BLOCK_16x16].intra_pred[33] = x265_intra_pred_ang16_33_avx2;
// copy_sp primitives
p.cu[BLOCK_16x16].copy_sp = x265_blockcopy_sp_16x16_avx2;
diff -r b74b3d65a109 -r 9f7f64adb536 source/common/x86/intrapred.h
--- a/source/common/x86/intrapred.h Mon Mar 16 19:11:34 2015 +0530
+++ b/source/common/x86/intrapred.h Mon Mar 16 19:25:15 2015 +0530
@@ -190,6 +190,7 @@
void x265_intra_pred_ang16_30_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
void x265_intra_pred_ang16_31_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
void x265_intra_pred_ang16_32_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
+void x265_intra_pred_ang16_33_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
void x265_all_angs_pred_4x4_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
void x265_all_angs_pred_8x8_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
void x265_all_angs_pred_16x16_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
diff -r b74b3d65a109 -r 9f7f64adb536 source/common/x86/intrapred8.asm
--- a/source/common/x86/intrapred8.asm Mon Mar 16 19:11:34 2015 +0530
+++ b/source/common/x86/intrapred8.asm Mon Mar 16 19:25:15 2015 +0530
@@ -199,6 +199,22 @@
db 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27
db 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
+ALIGN 32
+c_ang16_mode_33: db 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26
+ db 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20
+ db 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14
+ db 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8
+ db 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28
+ db 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22
+ db 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
+ db 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10
+ db 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30
+ db 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24
+ db 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18
+ db 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12
+ db 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6
+ db 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0
+
ALIGN 32
;; (blkSize - 1 - x)
pw_planar4_0: dw 3, 2, 1, 0, 3, 2, 1, 0
@@ -11227,3 +11243,172 @@
vpermq m3, m3, 11011000b
movu [r0 + r3], xm3
RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang16_33, 3, 5, 6
+ mova m0, [pw_1024]
+ mova m5, [intra_pred_shuff_0_8]
+ lea r3, [3 * r1]
+ lea r4, [c_ang16_mode_33]
+
+ vbroadcasti128 m1, [r2 + 1]
+ pshufb m1, m5
+ vbroadcasti128 m2, [r2 + 9]
+ pshufb m2, m5
+
+ vperm2i128 m1, m1, m2, 00100000b
+ pmaddubsw m3, m1, [r4 + 0 * mmsize]
+ pmulhrsw m3, m0
+
+ vbroadcasti128 m1, [r2 + 2]
+ pshufb m1, m5
+ vbroadcasti128 m2, [r2 + 10]
+ pshufb m2, m5
+
+ vperm2i128 m1, m1, m2, 00100000b
+ pmaddubsw m4, m1, [r4 + 1 * mmsize]
+ pmulhrsw m4, m0
+ packuswb m3, m4
+ vpermq m3, m3, 11011000b
+ movu [r0], xm3
+ vextracti128 xm3, m3, 1
+ movu [r0 + r1], xm3
+
+ vbroadcasti128 m1, [r2 + 3]
+ pshufb m1, m5
+ vbroadcasti128 m2, [r2 + 11]
+ pshufb m2, m5
+
+ vperm2i128 m1, m1, m2, 00100000b
+ pmaddubsw m3, m1, [r4 + 2 * mmsize]
+ pmulhrsw m3, m0
+
+ vbroadcasti128 m1, [r2 + 4]
+ pshufb m1, m5
+ vbroadcasti128 m2, [r2 + 12]
+ pshufb m2, m5
+
+ vperm2i128 m1, m1, m2, 00100000b
+ pmaddubsw m4, m1, [r4 + 3 * mmsize]
+ pmulhrsw m4, m0
+ packuswb m3, m4
+ vpermq m3, m3, 11011000b
+ movu [r0 + 2 * r1], xm3
+ vextracti128 xm3, m3, 1
+ movu [r0 + r3], xm3
+
+ lea r0, [r0 + 4 * r1]
+ add r4, 4 * mmsize
+
+ vbroadcasti128 m1, [r2 + 5]
+ pshufb m1, m5
+ vbroadcasti128 m2, [r2 + 13]
+ pshufb m2, m5
+
+ INTRA_PRED_ANG16_MC0 r0, r0 + r1, 0
+
+ vbroadcasti128 m1, [r2 + 6]
+ pshufb m1, m5
+ vbroadcasti128 m2, [r2 + 14]
+ pshufb m2, m5
+
+ vperm2i128 m1, m1, m2, 00100000b
+ pmaddubsw m3, m1, [r4 + 1 * mmsize]
+ pmulhrsw m3, m0
+
+ vbroadcasti128 m1, [r2 + 7]
+ pshufb m1, m5
+ vbroadcasti128 m2, [r2 + 15]
+ pshufb m2, m5
+
+ vperm2i128 m1, m1, m2, 00100000b
+ pmaddubsw m4, m1, [r4 + 2 * mmsize]
+ pmulhrsw m4, m0
+ packuswb m3, m4
+ vpermq m3, m3, 11011000b
+ movu [r0 + 2 * r1], xm3
+ vextracti128 xm3, m3, 1
+ movu [r0 + r3], xm3
+
+ vbroadcasti128 m1, [r2 + 8]
+ pshufb m1, m5
+ vbroadcasti128 m2, [r2 + 16]
+ pshufb m2, m5
+
+ lea r0, [r0 + 4 * r1]
+ vperm2i128 m1, m1, m2, 00100000b
+ pmaddubsw m3, m1, [r4 + 3 * mmsize]
+ pmulhrsw m3, m0
+ packuswb m3, m3
+ vpermq m3, m3, 11011000b
+ movu [r0], xm3
+
+ vbroadcasti128 m1, [r2 + 9]
+ pshufb m1, m5
+ vbroadcasti128 m2, [r2 + 17]
+ pshufb m2, m5
+
+ add r4, 4 * mmsize
+ INTRA_PRED_ANG16_MC0 r0 + r1, r0 + 2 * r1, 0
+
+ vbroadcasti128 m1, [r2 + 10]
+ pshufb m1, m5
+ vbroadcasti128 m2, [r2 + 18]
+ pshufb m2, m5
+
+ vperm2i128 m1, m1, m2, 00100000b
+ pmaddubsw m3, m1, [r4 + 1 * mmsize]
+ pmulhrsw m3, m0
+
+ vbroadcasti128 m1, [r2 + 11]
+ pshufb m1, m5
+ vbroadcasti128 m2, [r2 + 19]
+ pshufb m2, m5
+
+ vperm2i128 m1, m1, m2, 00100000b
+ pmaddubsw m4, m1, [r4 + 2 * mmsize]
+ pmulhrsw m4, m0
+ packuswb m3, m4
+ vpermq m3, m3, 11011000b
+ movu [r0 + r3], xm3
+ vextracti128 xm3, m3, 1
+ movu [r0 + 4 * r1], xm3
+
+ lea r0, [r0 + 4 * r1]
+
+ vbroadcasti128 m1, [r2 + 12]
+ pshufb m1, m5
+ vbroadcasti128 m2, [r2 + 20]
+ pshufb m2, m5
+
+ vperm2i128 m1, m1, m2, 00100000b
+ pmaddubsw m3, m1, [r4 + 3 * mmsize]
+ pmulhrsw m3, m0
+
+ vbroadcasti128 m1, [r2 + 13]
+ pshufb m1, m5
+ vbroadcasti128 m2, [r2 + 21]
+ pshufb m2, m5
+
+ vperm2i128 m1, m1, m2, 00100000b
+ pmaddubsw m4, m1, [r4 + 4 * mmsize]
+ pmulhrsw m4, m0
+ packuswb m3, m4
+ vpermq m3, m3, 11011000b
+ movu [r0 + r1], xm3
+ vextracti128 xm3, m3, 1
+ movu [r0 + 2 * r1], xm3
+
+ add r4, 4 * mmsize
+ vbroadcasti128 m1, [r2 + 14]
+ pshufb m1, m5
+ vbroadcasti128 m2, [r2 + 22]
+ pshufb m2, m5
+
+ vperm2i128 m1, m1, m2, 00100000b
+ pmaddubsw m3, m1, [r4 + 1 * mmsize]
+ pmulhrsw m3, m0
+ packuswb m3, m3
+ vpermq m3, m3, 11011000b
+ movu [r0 + r3], xm3
+ RET
More information about the x265-devel
mailing list