[x265] [PATCH 4 of 6] asm: avx2 code for intra_pred_ang16x16 mode 7, 8, 28 & 29
dnyaneshwar at multicorewareinc.com
dnyaneshwar at multicorewareinc.com
Thu May 28 08:36:21 CEST 2015
# HG changeset patch
# User Dnyaneshwar G <dnyaneshwar at multicorewareinc.com>
# Date 1432792824 -19800
# Thu May 28 11:30:24 2015 +0530
# Node ID d5f08c2b2b765f64b10ca847b12bc7a715a528b7
# Parent 9b83bec91d48517119661cd5ee596115d6f17ae1
asm: avx2 code for intra_pred_ang16x16 mode 7,8,28 & 29
performance improvement over SSE:
intra_ang_16x16[ 7] 1851c->1206c, 35%
intra_ang_16x16[ 8] 1870c->1187c, 36%
intra_ang_16x16[28] 1422c->757c, 47%
intra_ang_16x16[29] 1415c->761c, 46%
diff -r 9b83bec91d48 -r d5f08c2b2b76 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Thu May 28 11:26:09 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp Thu May 28 11:30:24 2015 +0530
@@ -1201,6 +1201,10 @@
p.cu[BLOCK_16x16].intra_pred[4] = x265_intra_pred_ang16_4_avx2;
p.cu[BLOCK_16x16].intra_pred[5] = x265_intra_pred_ang16_5_avx2;
p.cu[BLOCK_16x16].intra_pred[6] = x265_intra_pred_ang16_6_avx2;
+ p.cu[BLOCK_16x16].intra_pred[7] = x265_intra_pred_ang16_7_avx2;
+ p.cu[BLOCK_16x16].intra_pred[8] = x265_intra_pred_ang16_8_avx2;
+ p.cu[BLOCK_16x16].intra_pred[28] = x265_intra_pred_ang16_28_avx2;
+ p.cu[BLOCK_16x16].intra_pred[29] = x265_intra_pred_ang16_29_avx2;
p.cu[BLOCK_16x16].intra_pred[30] = x265_intra_pred_ang16_30_avx2;
p.cu[BLOCK_16x16].intra_pred[31] = x265_intra_pred_ang16_31_avx2;
p.cu[BLOCK_16x16].intra_pred[32] = x265_intra_pred_ang16_32_avx2;
diff -r 9b83bec91d48 -r d5f08c2b2b76 source/common/x86/intrapred16.asm
--- a/source/common/x86/intrapred16.asm Thu May 28 11:26:09 2015 +0530
+++ b/source/common/x86/intrapred16.asm Thu May 28 11:30:24 2015 +0530
@@ -10962,6 +10962,312 @@
TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 0, 1, 16
ret
+;; angle 16, modes 7 and 29
+cglobal ang16_mode_7_29
+ test r6d, r6d
+
+ movu m0, [r2 + 2] ; [16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+ movu m1, [r2 + 4] ; [17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2]
+
+ punpcklwd m3, m0, m1 ; [13 12 12 11 11 10 10 9 5 4 4 3 3 2 2 1]
+ punpckhwd m0, m1 ; [17 16 16 15 15 14 14 13 9 8 8 7 7 6 6 5]
+
+ movu m2, [r2 + 18] ; [24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9]
+ movu m4, [r2 + 20] ; [25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10]
+ punpcklwd m2, m4 ; [21 20 20 19 19 18 18 17 13 12 12 11 11 10 10 9]
+
+ pmaddwd m4, m3, [r3 - 8 * 32] ; [9]
+ paddd m4, [pd_16]
+ psrld m4, 5
+ pmaddwd m5, m0, [r3 - 8 * 32]
+ paddd m5, [pd_16]
+ psrld m5, 5
+ packusdw m4, m5
+
+ pmaddwd m5, m3, [r3 + 1 * 32] ; [18]
+ paddd m5, [pd_16]
+ psrld m5, 5
+ pmaddwd m8, m0, [r3 + 1 * 32]
+ paddd m8, [pd_16]
+ psrld m8, 5
+ packusdw m5, m8
+
+ pmaddwd m6, m3, [r3 + 10 * 32] ; [27]
+ paddd m6, [pd_16]
+ psrld m6, 5
+ pmaddwd m9, m0, [r3 + 10 * 32]
+ paddd m9, [pd_16]
+ psrld m9, 5
+ packusdw m6, m9
+
+ palignr m10, m0, m3, 4
+ pmaddwd m7, m10, [r3 - 13 * 32] ; [4]
+ paddd m7, [pd_16]
+ psrld m7, 5
+ palignr m11, m2, m0, 4
+ pmaddwd m8, m11, [r3 - 13 * 32]
+ paddd m8, [pd_16]
+ psrld m8, 5
+ packusdw m7, m8
+
+ pmaddwd m8, m10, [r3 - 4 * 32] ; [13]
+ paddd m8, [pd_16]
+ psrld m8, 5
+ pmaddwd m9, m11, [r3 - 4 * 32]
+ paddd m9, [pd_16]
+ psrld m9, 5
+ packusdw m8, m9
+
+ pmaddwd m9, m10, [r3 + 5 * 32] ; [22]
+ paddd m9, [pd_16]
+ psrld m9, 5
+ pmaddwd m12, m11, [r3 + 5 * 32]
+ paddd m12, [pd_16]
+ psrld m12, 5
+ packusdw m9, m12
+
+ pmaddwd m10, [r3 + 14 * 32] ; [31]
+ paddd m10, [pd_16]
+ psrld m10, 5
+ pmaddwd m11, [r3 + 14 * 32]
+ paddd m11, [pd_16]
+ psrld m11, 5
+ packusdw m10, m11
+
+ palignr m11, m0, m3, 8
+ pmaddwd m11, [r3 - 9 * 32] ; [8]
+ paddd m11, [pd_16]
+ psrld m11, 5
+ palignr m12, m2, m0, 8
+ pmaddwd m12, [r3 - 9 * 32]
+ paddd m12, [pd_16]
+ psrld m12, 5
+ packusdw m11, m12
+
+ TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 12, 1, 0
+
+ palignr m5, m0, m3, 8
+ pmaddwd m4, m5, [r3] ; [17]
+ paddd m4, [pd_16]
+ psrld m4, 5
+ palignr m6, m2, m0, 8
+ pmaddwd m7, m6, [r3]
+ paddd m7, [pd_16]
+ psrld m7, 5
+ packusdw m4, m7
+
+ pmaddwd m5, [r3 + 9 * 32] ; [26]
+ paddd m5, [pd_16]
+ psrld m5, 5
+ pmaddwd m6, [r3 + 9 * 32]
+ paddd m6, [pd_16]
+ psrld m6, 5
+ packusdw m5, m6
+
+ palignr m9, m0, m3, 12
+ pmaddwd m6, m9, [r3 - 14 * 32] ; [3]
+ paddd m6, [pd_16]
+ psrld m6, 5
+ palignr m3, m2, m0, 12
+ pmaddwd m7, m3, [r3 - 14 * 32]
+ paddd m7, [pd_16]
+ psrld m7, 5
+ packusdw m6, m7
+
+ pmaddwd m7, m9, [r3 - 5 * 32] ; [12]
+ paddd m7, [pd_16]
+ psrld m7, 5
+ pmaddwd m8, m3, [r3 - 5 * 32]
+ paddd m8, [pd_16]
+ psrld m8, 5
+ packusdw m7, m8
+
+ pmaddwd m8, m9, [r3 + 4 * 32] ; [21]
+ paddd m8, [pd_16]
+ psrld m8, 5
+ pmaddwd m10, m3, [r3 + 4 * 32]
+ paddd m10, [pd_16]
+ psrld m10, 5
+ packusdw m8, m10
+
+ pmaddwd m9, [r3 + 13 * 32] ; [30]
+ paddd m9, [pd_16]
+ psrld m9, 5
+ pmaddwd m3, [r3 + 13 * 32]
+ paddd m3, [pd_16]
+ psrld m3, 5
+ packusdw m9, m3
+
+ pmaddwd m10, m0, [r3 - 10 * 32] ; [7]
+ paddd m10, [pd_16]
+ psrld m10, 5
+ pmaddwd m12, m2, [r3 - 10 * 32]
+ paddd m12, [pd_16]
+ psrld m12, 5
+ packusdw m10, m12
+
+ pmaddwd m0, [r3 - 1 * 32] ; [16]
+ paddd m0, [pd_16]
+ psrld m0, 5
+ pmaddwd m2, [r3 - 1 * 32]
+ paddd m2, [pd_16]
+ psrld m2, 5
+ packusdw m0, m2
+ TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 0, 1, 2, 16
+ ret
+
+;; angle 16, modes 8 and 28
+cglobal ang16_mode_8_28
+ test r6d, r6d
+
+ movu m0, [r2 + 2] ; [16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+ movu m1, [r2 + 4] ; [17 16 15 14 13 12 11 10 9 8 7 6 5 4 3 2]
+
+ punpcklwd m3, m0, m1 ; [13 12 12 11 11 10 10 9 5 4 4 3 3 2 2 1]
+ punpckhwd m0, m1 ; [17 16 16 15 15 14 14 13 9 8 8 7 7 6 6 5]
+
+ movu m2, [r2 + 18] ; [24 23 22 21 20 19 18 17 16 15 14 13 12 11 10 9]
+ movu m4, [r2 + 20] ; [25 24 23 22 21 20 19 18 17 16 15 14 13 12 11 10]
+ punpcklwd m2, m4 ; [21 20 20 19 19 18 18 17 13 12 12 11 11 10 10 9]
+
+ pmaddwd m4, m3, [r3 - 10 * 32] ; [5]
+ paddd m4, [pd_16]
+ psrld m4, 5
+ pmaddwd m5, m0, [r3 - 10 * 32]
+ paddd m5, [pd_16]
+ psrld m5, 5
+ packusdw m4, m5
+
+ pmaddwd m5, m3, [r3 - 5 * 32] ; [10]
+ paddd m5, [pd_16]
+ psrld m5, 5
+ pmaddwd m8, m0, [r3 - 5 * 32]
+ paddd m8, [pd_16]
+ psrld m8, 5
+ packusdw m5, m8
+
+ pmaddwd m6, m3, [r3] ; [15]
+ paddd m6, [pd_16]
+ psrld m6, 5
+ pmaddwd m9, m0, [r3]
+ paddd m9, [pd_16]
+ psrld m9, 5
+ packusdw m6, m9
+
+ pmaddwd m7, m3, [r3 + 5 * 32] ; [20]
+ paddd m7, [pd_16]
+ psrld m7, 5
+ pmaddwd m8, m0, [r3 + 5 * 32]
+ paddd m8, [pd_16]
+ psrld m8, 5
+ packusdw m7, m8
+
+ pmaddwd m8, m3, [r3 + 10 * 32] ; [25]
+ paddd m8, [pd_16]
+ psrld m8, 5
+ pmaddwd m9, m0, [r3 + 10 * 32]
+ paddd m9, [pd_16]
+ psrld m9, 5
+ packusdw m8, m9
+
+ pmaddwd m9, m3, [r3 + 15 * 32] ; [30]
+ paddd m9, [pd_16]
+ psrld m9, 5
+ pmaddwd m10, m0, [r3 + 15 * 32]
+ paddd m10, [pd_16]
+ psrld m10, 5
+ packusdw m9, m10
+
+ palignr m11, m0, m3, 4
+ pmaddwd m10, m11, [r3 - 12 * 32] ; [3]
+ paddd m10, [pd_16]
+ psrld m10, 5
+ palignr m1, m2, m0, 4
+ pmaddwd m12, m1, [r3 - 12 * 32]
+ paddd m12, [pd_16]
+ psrld m12, 5
+ packusdw m10, m12
+
+ pmaddwd m11, [r3 - 7 * 32] ; [8]
+ paddd m11, [pd_16]
+ psrld m11, 5
+ pmaddwd m1, [r3 - 7 * 32]
+ paddd m1, [pd_16]
+ psrld m1, 5
+ packusdw m11, m1
+
+ TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 12, 1, 0
+
+ palignr m7, m0, m3, 4
+ pmaddwd m4, m7, [r3 - 2 * 32] ; [13]
+ paddd m4, [pd_16]
+ psrld m4, 5
+ palignr m1, m2, m0, 4
+ pmaddwd m5, m1, [r3 - 2 * 32]
+ paddd m5, [pd_16]
+ psrld m5, 5
+ packusdw m4, m5
+
+ pmaddwd m5, m7, [r3 + 3 * 32] ; [18]
+ paddd m5, [pd_16]
+ psrld m5, 5
+ pmaddwd m6, m1, [r3 + 3 * 32]
+ paddd m6, [pd_16]
+ psrld m6, 5
+ packusdw m5, m6
+
+ pmaddwd m6, m7, [r3 + 8 * 32] ; [23]
+ paddd m6, [pd_16]
+ psrld m6, 5
+ pmaddwd m8, m1, [r3 + 8 * 32]
+ paddd m8, [pd_16]
+ psrld m8, 5
+ packusdw m6, m8
+
+ pmaddwd m7, [r3 + 13 * 32] ; [28]
+ paddd m7, [pd_16]
+ psrld m7, 5
+ pmaddwd m1, [r3 + 13 * 32]
+ paddd m1, [pd_16]
+ psrld m1, 5
+ packusdw m7, m1
+
+ palignr m1, m0, m3, 8
+ pmaddwd m8, m1, [r3 - 14 * 32] ; [1]
+ paddd m8, [pd_16]
+ psrld m8, 5
+ palignr m2, m0, 8
+ pmaddwd m9, m2, [r3 - 14 * 32]
+ paddd m9, [pd_16]
+ psrld m9, 5
+ packusdw m8, m9
+
+ pmaddwd m9, m1, [r3 - 9 * 32] ; [6]
+ paddd m9, [pd_16]
+ psrld m9, 5
+ pmaddwd m3, m2, [r3 - 9 * 32]
+ paddd m3, [pd_16]
+ psrld m3, 5
+ packusdw m9, m3
+
+ pmaddwd m3, m1, [r3 - 4 * 32] ; [11]
+ paddd m3, [pd_16]
+ psrld m3, 5
+ pmaddwd m0, m2, [r3 - 4 * 32]
+ paddd m0, [pd_16]
+ psrld m0, 5
+ packusdw m3, m0
+
+ pmaddwd m1, [r3 + 1 * 32] ; [16]
+ paddd m1, [pd_16]
+ psrld m1, 5
+ pmaddwd m2, [r3 + 1 * 32]
+ paddd m2, [pd_16]
+ psrld m2, 5
+ packusdw m1, m2
+ TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 3, 1, 0, 2, 16
+ ret
+
cglobal intra_pred_ang16_3, 3,7,13
add r2, 64
xor r6d, r6d
@@ -11052,6 +11358,46 @@
call ang16_mode_6_30
RET
+cglobal intra_pred_ang16_7, 3,7,13
+ add r2, 64
+ xor r6d, r6d
+ lea r3, [ang_table_avx2 + 17 * 32]
+ add r1d, r1d
+ lea r4, [r1 * 3]
+
+ call ang16_mode_7_29
+ RET
+
+cglobal intra_pred_ang16_29, 3,7,13
+ xor r6d, r6d
+ inc r6d
+ lea r3, [ang_table_avx2 + 17 * 32]
+ add r1d, r1d
+ lea r4, [r1 * 3]
+
+ call ang16_mode_7_29
+ RET
+
+cglobal intra_pred_ang16_8, 3,7,13
+ add r2, 64
+ xor r6d, r6d
+ lea r3, [ang_table_avx2 + 15 * 32]
+ add r1d, r1d
+ lea r4, [r1 * 3]
+
+ call ang16_mode_8_28
+ RET
+
+cglobal intra_pred_ang16_28, 3,7,13
+ xor r6d, r6d
+ inc r6d
+ lea r3, [ang_table_avx2 + 15 * 32]
+ add r1d, r1d
+ lea r4, [r1 * 3]
+
+ call ang16_mode_8_28
+ RET
+
;-------------------------------------------------------------------------------------------------------
; end of avx2 code for intra_pred_ang16 mode 2 to 34
;-------------------------------------------------------------------------------------------------------
More information about the x265-devel
mailing list