[x265] [PATCH 5 of 6] asm: intra_pred_ang32_31 improved by ~54% over SSE4
praveen at multicorewareinc.com
praveen at multicorewareinc.com
Wed Mar 25 14:57:57 CET 2015
# HG changeset patch
# User Praveen Tiwari <praveen at multicorewareinc.com>
# Date 1427281665 -19800
# Wed Mar 25 16:37:45 2015 +0530
# Node ID 69a891142c51e06304d5d7ebe49c1e8c3e41b1ed
# Parent be44d5754e7759dddbbeaf63a52fab99b14bc0ab
asm: intra_pred_ang32_31 improved by ~54% over SSE4
AVX2:
intra_ang_32x32[31] 16.42x 1810.50 29723.12
SSE4:
intra_ang_32x32[32] 7.66x 4017.17 30782.31
diff -r be44d5754e77 -r 69a891142c51 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Wed Mar 25 16:33:08 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp Wed Mar 25 16:37:45 2015 +0530
@@ -1605,6 +1605,7 @@
p.cu[BLOCK_32x32].intra_pred[28] = x265_intra_pred_ang32_28_avx2;
p.cu[BLOCK_32x32].intra_pred[29] = x265_intra_pred_ang32_29_avx2;
p.cu[BLOCK_32x32].intra_pred[30] = x265_intra_pred_ang32_30_avx2;
+ p.cu[BLOCK_32x32].intra_pred[31] = x265_intra_pred_ang32_31_avx2;
// copy_sp primitives
p.cu[BLOCK_16x16].copy_sp = x265_blockcopy_sp_16x16_avx2;
diff -r be44d5754e77 -r 69a891142c51 source/common/x86/intrapred.h
--- a/source/common/x86/intrapred.h Wed Mar 25 16:33:08 2015 +0530
+++ b/source/common/x86/intrapred.h Wed Mar 25 16:37:45 2015 +0530
@@ -210,6 +210,7 @@
void x265_intra_pred_ang32_28_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
void x265_intra_pred_ang32_29_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
void x265_intra_pred_ang32_30_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
+void x265_intra_pred_ang32_31_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
void x265_all_angs_pred_4x4_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
void x265_all_angs_pred_8x8_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
void x265_all_angs_pred_16x16_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
diff -r be44d5754e77 -r 69a891142c51 source/common/x86/intrapred8.asm
--- a/source/common/x86/intrapred8.asm Wed Mar 25 16:33:08 2015 +0530
+++ b/source/common/x86/intrapred8.asm Wed Mar 25 16:37:45 2015 +0530
@@ -330,6 +330,27 @@
db 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19
db 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0
+
+ALIGN 32
+c_ang32_mode_31: db 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17
+ db 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19
+ db 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21
+ db 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23
+ db 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25
+ db 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27
+ db 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29
+ db 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31
+ db 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
+ db 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18
+ db 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20
+ db 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22
+ db 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24
+ db 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26
+ db 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28
+ db 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30
+ db 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15
+ db 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0
+
ALIGN 32
;; (blkSize - 1 - x)
pw_planar4_0: dw 3, 2, 1, 0, 3, 2, 1, 0
@@ -12708,5 +12729,332 @@
vpermq m6, m6, 11011000b
movu [r0 + r3], m6
RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang32_31, 3, 5, 11
+ mova m0, [pw_1024]
+ mova m1, [intra_pred_shuff_0_8]
+ lea r3, [3 * r1]
+ lea r4, [c_ang32_mode_31]
+
+ ;row [0]
+ vbroadcasti128 m2, [r2 + 1]
+ pshufb m2, m1
+ vbroadcasti128 m3, [r2 + 9]
+ pshufb m3, m1
+ vbroadcasti128 m4, [r2 + 17]
+ pshufb m4, m1
+ vbroadcasti128 m5, [r2 + 25]
+ pshufb m5, m1
+
+ mova m10, [r4 + 0 * mmsize]
+ vperm2i128 m6, m2, m3, 00100000b
+ pmaddubsw m6, m10
+ pmulhrsw m6, m0
+ vperm2i128 m7, m4, m5, 00100000b
+ pmaddubsw m7, m10
+ pmulhrsw m7, m0
+ packuswb m6, m7
+ vpermq m6, m6, 11011000b
+ movu [r0], m6
+
+ ;row [1, 2]
+ vbroadcasti128 m2, [r2 + 2]
+ pshufb m2, m1
+ vbroadcasti128 m3, [r2 + 10]
+ pshufb m3, m1
+ vbroadcasti128 m4, [r2 + 18]
+ pshufb m4, m1
+ vbroadcasti128 m5, [r2 + 26]
+ pshufb m5, m1
+
+ mova m10, [r4 + 1 * mmsize]
+
+ INTRA_PRED_ANG32_CAL_ROW
+ movu [r0 + r1], m7
+ movu [r0 + 2 * r1], m6
+
+ ;row [3, 4]
+ vbroadcasti128 m2, [r2 + 3]
+ pshufb m2, m1
+ vbroadcasti128 m3, [r2 + 11]
+ pshufb m3, m1
+ vbroadcasti128 m4, [r2 + 19]
+ pshufb m4, m1
+ vbroadcasti128 m5, [r2 + 27]
+ pshufb m5, m1
+
+ mova m10, [r4 + 2 * mmsize]
+
+ INTRA_PRED_ANG32_CAL_ROW
+ movu [r0 + r3], m7
+ lea r0, [r0 + 4 * r1]
+ movu [r0], m6
+
+ ;row [5, 6]
+ vbroadcasti128 m2, [r2 + 4]
+ pshufb m2, m1
+ vbroadcasti128 m3, [r2 + 12]
+ pshufb m3, m1
+ vbroadcasti128 m4, [r2 + 20]
+ pshufb m4, m1
+ vbroadcasti128 m5, [r2 + 28]
+ pshufb m5, m1
+
+ mova m10, [r4 + 3 * mmsize]
+
+ INTRA_PRED_ANG32_CAL_ROW
+ movu [r0 + r1], m7
+ movu [r0 + 2 * r1], m6
+
+ ;row [7, 8]
+ vbroadcasti128 m2, [r2 + 5]
+ pshufb m2, m1
+ vbroadcasti128 m3, [r2 + 13]
+ pshufb m3, m1
+ vbroadcasti128 m4, [r2 + 21]
+ pshufb m4, m1
+ vbroadcasti128 m5, [r2 + 29]
+ pshufb m5, m1
+
+ add r4, 4 * mmsize
+ mova m10, [r4 + 0 * mmsize]
+
+ INTRA_PRED_ANG32_CAL_ROW
+ movu [r0 + r3], m7
+ lea r0, [r0 + 4 * r1]
+ movu [r0], m6
+
+ ;row [9, 10]
+ vbroadcasti128 m2, [r2 + 6]
+ pshufb m2, m1
+ vbroadcasti128 m3, [r2 + 14]
+ pshufb m3, m1
+ vbroadcasti128 m4, [r2 + 22]
+ pshufb m4, m1
+ vbroadcasti128 m5, [r2 + 30]
+ pshufb m5, m1
+
+ mova m10, [r4 + 1 * mmsize]
+
+ INTRA_PRED_ANG32_CAL_ROW
+ movu [r0 + r1], m7
+ movu [r0 + 2 * r1], m6
+
+ ;row [11, 12]
+ vbroadcasti128 m2, [r2 + 7]
+ pshufb m2, m1
+ vbroadcasti128 m3, [r2 + 15]
+ pshufb m3, m1
+ vbroadcasti128 m4, [r2 + 23]
+ pshufb m4, m1
+ vbroadcasti128 m5, [r2 + 31]
+ pshufb m5, m1
+
+ mova m10, [r4 + 2 * mmsize]
+
+ INTRA_PRED_ANG32_CAL_ROW
+ movu [r0 + r3], m7
+ lea r0, [r0 + 4 * r1]
+ movu [r0], m6
+
+ ;row [13, 14]
+ vbroadcasti128 m2, [r2 + 8]
+ pshufb m2, m1
+ vbroadcasti128 m3, [r2 + 16]
+ pshufb m3, m1
+ vbroadcasti128 m4, [r2 + 24]
+ pshufb m4, m1
+ vbroadcasti128 m5, [r2 + 32]
+ pshufb m5, m1
+
+ mova m10, [r4 + 3 * mmsize]
+
+ INTRA_PRED_ANG32_CAL_ROW
+ movu [r0 + r1], m7
+ movu [r0 + 2 * r1], m6
+
+ ;row [15]
+ vbroadcasti128 m2, [r2 + 9]
+ pshufb m2, m1
+ vbroadcasti128 m3, [r2 + 17]
+ pshufb m3, m1
+ vbroadcasti128 m4, [r2 + 25]
+ pshufb m4, m1
+ vbroadcasti128 m5, [r2 + 33]
+ pshufb m5, m1
+
+ add r4, 4 * mmsize
+ mova m10, [r4 + 0 * mmsize]
+ vperm2i128 m6, m2, m3, 00100000b
+ pmaddubsw m6, m10
+ pmulhrsw m6, m0
+ vperm2i128 m7, m4, m5, 00100000b
+ pmaddubsw m7, m10
+ pmulhrsw m7, m0
+ packuswb m6, m7
+ vpermq m6, m6, 11011000b
+ movu [r0 + r3], m6
+
+ ;row [16, 17]
+ vbroadcasti128 m2, [r2 + 10]
+ pshufb m2, m1
+ vbroadcasti128 m3, [r2 + 18]
+ pshufb m3, m1
+ vbroadcasti128 m4, [r2 + 26]
+ pshufb m4, m1
+ vbroadcasti128 m5, [r2 + 34]
+ pshufb m5, m1
+
+ lea r0, [r0 + 4 * r1]
+ mova m10, [r4 + 1 * mmsize]
+
+ INTRA_PRED_ANG32_CAL_ROW
+ movu [r0], m7
+ movu [r0 + r1], m6
+
+ ;row [18, 19]
+ vbroadcasti128 m2, [r2 + 11]
+ pshufb m2, m1
+ vbroadcasti128 m3, [r2 + 19]
+ pshufb m3, m1
+ vbroadcasti128 m4, [r2 + 27]
+ pshufb m4, m1
+ vbroadcasti128 m5, [r2 + 35]
+ pshufb m5, m1
+
+ mova m10, [r4 + 2 * mmsize]
+
+ INTRA_PRED_ANG32_CAL_ROW
+ movu [r0 + 2 * r1], m7
+ movu [r0 + r3], m6
+
+ ;row [20, 21]
+ vbroadcasti128 m2, [r2 + 12]
+ pshufb m2, m1
+ vbroadcasti128 m3, [r2 + 20]
+ pshufb m3, m1
+ vbroadcasti128 m4, [r2 + 28]
+ pshufb m4, m1
+ vbroadcasti128 m5, [r2 + 36]
+ pshufb m5, m1
+
+ mova m10, [r4 + 3 * mmsize]
+ lea r0, [r0 + 4 * r1]
+
+ INTRA_PRED_ANG32_CAL_ROW
+ movu [r0], m7
+ movu [r0 + r1], m6
+
+ ;row [22, 23]
+ vbroadcasti128 m2, [r2 + 13]
+ pshufb m2, m1
+ vbroadcasti128 m3, [r2 + 21]
+ pshufb m3, m1
+ vbroadcasti128 m4, [r2 + 29]
+ pshufb m4, m1
+ vbroadcasti128 m5, [r2 + 37]
+ pshufb m5, m1
+
+ add r4, 4 * mmsize
+ mova m10, [r4 + 0 * mmsize]
+
+ INTRA_PRED_ANG32_CAL_ROW
+ movu [r0 + 2 * r1], m7
+ movu [r0 + r3], m6
+
+ ;row [24, 25]
+ vbroadcasti128 m2, [r2 + 14]
+ pshufb m2, m1
+ vbroadcasti128 m3, [r2 + 22]
+ pshufb m3, m1
+ vbroadcasti128 m4, [r2 + 30]
+ pshufb m4, m1
+ vbroadcasti128 m5, [r2 + 38]
+ pshufb m5, m1
+
+ mova m10, [r4 + 1 * mmsize]
+ lea r0, [r0 + 4 * r1]
+
+ INTRA_PRED_ANG32_CAL_ROW
+ movu [r0], m7
+ movu [r0 + r1], m6
+
+ ;row [26, 27]
+ vbroadcasti128 m2, [r2 + 15]
+ pshufb m2, m1
+ vbroadcasti128 m3, [r2 + 23]
+ pshufb m3, m1
+ vbroadcasti128 m4, [r2 + 31]
+ pshufb m4, m1
+ vbroadcasti128 m5, [r2 + 39]
+ pshufb m5, m1
+
+ mova m10, [r4 + 2 * mmsize]
+
+ INTRA_PRED_ANG32_CAL_ROW
+ movu [r0 + 2 * r1], m7
+ movu [r0 + r3], m6
+
+ ;row [28, 29]
+ vbroadcasti128 m2, [r2 + 16]
+ pshufb m2, m1
+ vbroadcasti128 m3, [r2 + 24]
+ pshufb m3, m1
+ vbroadcasti128 m4, [r2 + 32]
+ pshufb m4, m1
+ vbroadcasti128 m5, [r2 + 40]
+ pshufb m5, m1
+
+ mova m10, [r4 + 3 * mmsize]
+ lea r0, [r0 + 4 * r1]
+
+ INTRA_PRED_ANG32_CAL_ROW
+ movu [r0], m7
+ movu [r0 + r1], m6
+
+ ;row [30]
+ vbroadcasti128 m2, [r2 + 17]
+ pshufb m2, m1
+ vbroadcasti128 m3, [r2 + 25]
+ pshufb m3, m1
+ vbroadcasti128 m4, [r2 + 33]
+ pshufb m4, m1
+ vbroadcasti128 m5, [r2 + 41]
+ pshufb m5, m1
+
+ add r4, 4 * mmsize
+ mova m10, [r4 + 0 * mmsize]
+ vperm2i128 m6, m2, m3, 00100000b
+ pmaddubsw m6, m10
+ pmulhrsw m6, m0
+ vperm2i128 m7, m4, m5, 00100000b
+ pmaddubsw m7, m10
+ pmulhrsw m7, m0
+ packuswb m6, m7
+ vpermq m6, m6, 11011000b
+ movu [r0 + 2 * r1], m6
+
+ ;row [31]
+ vbroadcasti128 m2, [r2 + 18]
+ pshufb m2, m1
+ vbroadcasti128 m3, [r2 + 26]
+ pshufb m3, m1
+ vbroadcasti128 m4, [r2 + 34]
+ pshufb m4, m1
+ vbroadcasti128 m5, [r2 + 42]
+ pshufb m5, m1
+
+ mova m10, [r4 + 1 * mmsize]
+ vperm2i128 m6, m2, m3, 00100000b
+ pmaddubsw m6, m10
+ pmulhrsw m6, m0
+ vperm2i128 m7, m4, m5, 00100000b
+ pmaddubsw m7, m10
+ pmulhrsw m7, m0
+ packuswb m6, m7
+ vpermq m6, m6, 11011000b
+ movu [r0 + r3], m6
+ RET
%endif
More information about the x265-devel
mailing list