[x265] [PATCH] asm: 10bpp avx2 code for intra_pred_ang32x32 mode 12 & 24
dnyaneshwar at multicorewareinc.com
dnyaneshwar at multicorewareinc.com
Fri Jun 12 06:23:06 CEST 2015
# HG changeset patch
# User Dnyaneshwar G <dnyaneshwar at multicorewareinc.com>
# Date 1434019907 -19800
# Thu Jun 11 16:21:47 2015 +0530
# Node ID 98065d5141d80130899e88e31cd136e2ca7e47dd
# Parent 091940c84ef56d1373d8aa476e8d3f1618436567
asm: 10bpp avx2 code for intra_pred_ang32x32 mode 12 & 24
performance improvement over SSE:
intra_ang_32x32[12] 8084c->4584c, 43%
intra_ang_32x32[24] 5629c->2934c, 48%
diff -r 091940c84ef5 -r 98065d5141d8 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Thu Jun 11 11:04:36 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp Thu Jun 11 16:21:47 2015 +0530
@@ -1264,6 +1264,8 @@
p.cu[BLOCK_32x32].intra_pred[9] = x265_intra_pred_ang32_9_avx2;
p.cu[BLOCK_32x32].intra_pred[10] = x265_intra_pred_ang32_10_avx2;
p.cu[BLOCK_32x32].intra_pred[11] = x265_intra_pred_ang32_11_avx2;
+ p.cu[BLOCK_32x32].intra_pred[12] = x265_intra_pred_ang32_12_avx2;
+ p.cu[BLOCK_32x32].intra_pred[24] = x265_intra_pred_ang32_24_avx2;
p.cu[BLOCK_32x32].intra_pred[25] = x265_intra_pred_ang32_25_avx2;
p.cu[BLOCK_32x32].intra_pred[26] = x265_intra_pred_ang32_26_avx2;
p.cu[BLOCK_32x32].intra_pred[27] = x265_intra_pred_ang32_27_avx2;
diff -r 091940c84ef5 -r 98065d5141d8 source/common/x86/intrapred.h
--- a/source/common/x86/intrapred.h Thu Jun 11 11:04:36 2015 +0530
+++ b/source/common/x86/intrapred.h Thu Jun 11 16:21:47 2015 +0530
@@ -285,6 +285,7 @@
void x265_intra_pred_ang32_9_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
void x265_intra_pred_ang32_10_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
void x265_intra_pred_ang32_11_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
+void x265_intra_pred_ang32_12_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
void x265_intra_pred_ang32_26_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
void x265_intra_pred_ang32_27_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
void x265_intra_pred_ang32_28_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
diff -r 091940c84ef5 -r 98065d5141d8 source/common/x86/intrapred16.asm
--- a/source/common/x86/intrapred16.asm Thu Jun 11 11:04:36 2015 +0530
+++ b/source/common/x86/intrapred16.asm Thu Jun 11 16:21:47 2015 +0530
@@ -48,6 +48,8 @@
const pw_ang16_15_21, db 12, 13, 12, 13, 8, 9, 8, 9, 4, 5, 4, 5, 0, 1, 0, 1
const pw_ang16_16_20, db 8, 9, 8, 9, 6, 7, 6, 7, 2, 3, 2, 3, 0, 1, 0, 1
+const pw_ang32_12_24, db 0, 1, 0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7, 6, 7
+
const shuf_mode_13_23, db 0, 0, 14, 15, 6, 7, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0
const shuf_mode_14_22, db 14, 15, 10, 11, 4, 5, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0
const shuf_mode_15_21, db 12, 13, 8, 9, 4, 5, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0
@@ -14717,6 +14719,389 @@
mov r5d, [rsp]
mov [r2 - 32], r5w
RET
+
+;; angle 32, modes 12 and 24, row 0 to 15
+cglobal ang32_mode_12_24_0_15
+ test r6d, r6d
+
+ movu m0, [r2] ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0]
+ movu m4, [r2 + 2] ; [16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+
+ punpcklwd m3, m0, m4 ; [12 11 11 10 10 9 9 8 4 3 3 2 2 1 1 0]
+ punpckhwd m2, m0, m4 ; [16 15 15 14 14 13 13 12 8 7 7 6 6 5 5 4]
+
+ pmaddwd m4, m3, [r3 + 11 * 32] ; [27]
+ paddd m4, [pd_16]
+ psrld m4, 5
+ pmaddwd m5, m2, [r3 + 11 * 32]
+ paddd m5, [pd_16]
+ psrld m5, 5
+ packusdw m4, m5
+
+ pmaddwd m5, m3, [r3 + 6 * 32] ; [22]
+ paddd m5, [pd_16]
+ psrld m5, 5
+ pmaddwd m8, m2, [r3 + 6 * 32]
+ paddd m8, [pd_16]
+ psrld m8, 5
+ packusdw m5, m8
+
+ pmaddwd m6, m3, [r3 + 1 * 32] ; [17]
+ paddd m6, [pd_16]
+ psrld m6, 5
+ pmaddwd m9, m2, [r3 + 1 * 32]
+ paddd m9, [pd_16]
+ psrld m9, 5
+ packusdw m6, m9
+
+ pmaddwd m7, m3, [r3 - 4 * 32] ; [12]
+ paddd m7, [pd_16]
+ psrld m7, 5
+ pmaddwd m8, m2, [r3 - 4 * 32]
+ paddd m8, [pd_16]
+ psrld m8, 5
+ packusdw m7, m8
+
+ pmaddwd m8, m3, [r3 - 9 * 32] ; [7]
+ paddd m8, [pd_16]
+ psrld m8, 5
+ pmaddwd m9, m2, [r3 - 9 * 32]
+ paddd m9, [pd_16]
+ psrld m9, 5
+ packusdw m8, m9
+
+ pmaddwd m9, m3, [r3 - 14 * 32] ; [2]
+ paddd m9, [pd_16]
+ psrld m9, 5
+ pmaddwd m2, [r3 - 14 * 32]
+ paddd m2, [pd_16]
+ psrld m2, 5
+ packusdw m9, m2
+
+ movu xm1, [r2 - 8]
+ pshufb xm1, [pw_ang32_12_24]
+ punpcklwd m3, m0, m0 ; [11 11 10 10 9 9 8 8 3 3 2 2 1 1 0 0]
+ punpckhwd m0, m0 ; [15 15 14 14 13 13 12 12 7 7 6 6 5 5 4 4]
+ vinserti128 m1, m1, xm0, 1 ; [ 7 7 6 6 5 5 4 4 6 6 13 13 19 19 26 26]
+
+ palignr m2, m3, m1, 14 ; [11 10 10 9 9 8 8 7 3 2 2 1 1 0 0 6]
+ palignr m13, m0, m3, 14 ; [15 14 14 13 13 12 12 11 7 6 6 5 5 4 4 3]
+
+ pmaddwd m10, m2, [r3 + 13 * 32] ; [29]
+ paddd m10, [pd_16]
+ psrld m10, 5
+ pmaddwd m12, m13, [r3 + 13 * 32]
+ paddd m12, [pd_16]
+ psrld m12, 5
+ packusdw m10, m12
+
+ pmaddwd m11, m2, [r3 + 8 * 32] ; [24]
+ paddd m11, [pd_16]
+ psrld m11, 5
+ pmaddwd m13, [r3 + 8 * 32]
+ paddd m13, [pd_16]
+ psrld m13, 5
+ packusdw m11, m13
+
+ TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0
+
+ palignr m13, m0, m3, 14
+
+ pmaddwd m4, m2, [r3 + 3 * 32] ; [19]
+ paddd m4, [pd_16]
+ psrld m4, 5
+ pmaddwd m5, m13, [r3 + 3 * 32]
+ paddd m5, [pd_16]
+ psrld m5, 5
+ packusdw m4, m5
+
+ pmaddwd m5, m2, [r3 - 2 * 32] ; [14]
+ paddd m5, [pd_16]
+ psrld m5, 5
+ pmaddwd m6, m13, [r3 - 2 * 32]
+ paddd m6, [pd_16]
+ psrld m6, 5
+ packusdw m5, m6
+
+ pmaddwd m6, m2, [r3 - 7 * 32] ; [9]
+ paddd m6, [pd_16]
+ psrld m6, 5
+ pmaddwd m8, m13, [r3 - 7 * 32]
+ paddd m8, [pd_16]
+ psrld m8, 5
+ packusdw m6, m8
+
+ pmaddwd m7, m2, [r3 - 12 * 32] ; [4]
+ paddd m7, [pd_16]
+ psrld m7, 5
+ pmaddwd m8, m13, [r3 - 12 * 32]
+ paddd m8, [pd_16]
+ psrld m8, 5
+ packusdw m7, m8
+
+ palignr m0, m3, 10
+ palignr m3, m1, 10
+
+ pmaddwd m8, m3, [r3 + 15 * 32] ; [31]
+ paddd m8, [pd_16]
+ psrld m8, 5
+ pmaddwd m9, m0, [r3 + 15 * 32]
+ paddd m9, [pd_16]
+ psrld m9, 5
+ packusdw m8, m9
+
+ pmaddwd m9, m3, [r3 + 10 * 32] ; [26]
+ paddd m9, [pd_16]
+ psrld m9, 5
+ pmaddwd m10, m0, [r3 + 10 * 32]
+ paddd m10, [pd_16]
+ psrld m10, 5
+ packusdw m9, m10
+
+ pmaddwd m10, m3, [r3 + 5 * 32] ; [21]
+ paddd m10, [pd_16]
+ psrld m10, 5
+ pmaddwd m2, m0, [r3 + 5 * 32]
+ paddd m2, [pd_16]
+ psrld m2, 5
+ packusdw m10, m2
+
+ pmaddwd m3, [r3] ; [16]
+ paddd m3, [pd_16]
+ psrld m3, 5
+ pmaddwd m0, [r3]
+ paddd m0, [pd_16]
+ psrld m0, 5
+ packusdw m3, m0
+ TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 3, 0, 2, 16
+ ret
+
+;; angle 32, modes 12 and 24, row 16 to 31
+cglobal ang32_mode_12_24_16_31
+ test r6d, r6d
+
+ movu m0, [r2] ; [15 14 13 12 11 10 9 8 7 6 5 4 3 2 1 0]
+ movu m4, [r2 + 2] ; [16 15 14 13 12 11 10 9 8 7 6 5 4 3 2 1]
+
+ punpcklwd m3, m0, m4 ; [12 11 11 10 10 9 9 8 4 3 3 2 2 1 1 0]
+ punpckhwd m2, m0, m4 ; [16 15 15 14 14 13 13 12 8 7 7 6 6 5 5 4]
+
+ punpcklwd m3, m0, m0 ; [11 11 10 10 9 9 8 8 3 3 2 2 1 1 0 0]
+ punpckhwd m0, m0 ; [15 15 14 14 13 13 12 12 7 7 6 6 5 5 4 4]
+
+ palignr m2, m3, m1, 10
+ palignr m13, m0, m3, 10
+
+ pmaddwd m4, m2, [r3 - 5 * 32] ; [11]
+ paddd m4, [pd_16]
+ psrld m4, 5
+ pmaddwd m5, m13, [r3 - 5 * 32]
+ paddd m5, [pd_16]
+ psrld m5, 5
+ packusdw m4, m5
+
+ pmaddwd m5, m2, [r3 - 10 * 32] ; [6]
+ paddd m5, [pd_16]
+ psrld m5, 5
+ pmaddwd m8, m13, [r3 - 10 * 32]
+ paddd m8, [pd_16]
+ psrld m8, 5
+ packusdw m5, m8
+
+ pmaddwd m6, m2, [r3 - 15 * 32] ; [1]
+ paddd m6, [pd_16]
+ psrld m6, 5
+ pmaddwd m9, m13, [r3 - 15 * 32]
+ paddd m9, [pd_16]
+ psrld m9, 5
+ packusdw m6, m9
+
+ palignr m2, m3, m1, 6
+ palignr m13, m0, m3, 6
+
+ pmaddwd m7, m2, [r3 + 12 * 32] ; [28]
+ paddd m7, [pd_16]
+ psrld m7, 5
+ pmaddwd m8, m13, [r3 + 12 * 32]
+ paddd m8, [pd_16]
+ psrld m8, 5
+ packusdw m7, m8
+
+ pmaddwd m8, m2, [r3 + 7 * 32] ; [23]
+ paddd m8, [pd_16]
+ psrld m8, 5
+ pmaddwd m9, m13, [r3 + 7 * 32]
+ paddd m9, [pd_16]
+ psrld m9, 5
+ packusdw m8, m9
+
+ pmaddwd m9, m2, [r3 + 2 * 32] ; [18]
+ paddd m9, [pd_16]
+ psrld m9, 5
+ pmaddwd m10, m13, [r3 + 2 * 32]
+ paddd m10, [pd_16]
+ psrld m10, 5
+ packusdw m9, m10
+
+ pmaddwd m10, m2, [r3 - 3 * 32] ; [13]
+ paddd m10, [pd_16]
+ psrld m10, 5
+ pmaddwd m12, m13, [r3 - 3 * 32]
+ paddd m12, [pd_16]
+ psrld m12, 5
+ packusdw m10, m12
+
+ pmaddwd m11, m2, [r3 - 8 * 32] ; [8]
+ paddd m11, [pd_16]
+ psrld m11, 5
+ pmaddwd m13, [r3 - 8 * 32]
+ paddd m13, [pd_16]
+ psrld m13, 5
+ packusdw m11, m13
+
+ TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 0
+
+ palignr m13, m0, m3, 6
+
+ pmaddwd m4, m2, [r3 - 13 * 32] ; [3]
+ paddd m4, [pd_16]
+ psrld m4, 5
+ pmaddwd m5, m13, [r3 - 13 * 32]
+ paddd m5, [pd_16]
+ psrld m5, 5
+ packusdw m4, m5
+
+ palignr m2, m3, m1, 2
+ palignr m13, m0, m3, 2
+
+ pmaddwd m5, m2, [r3 + 14 * 32] ; [30]
+ paddd m5, [pd_16]
+ psrld m5, 5
+ pmaddwd m6, m13, [r3 + 14 * 32]
+ paddd m6, [pd_16]
+ psrld m6, 5
+ packusdw m5, m6
+
+ pmaddwd m6, m2, [r3 + 9 * 32] ; [25]
+ paddd m6, [pd_16]
+ psrld m6, 5
+ pmaddwd m8, m13, [r3 + 9 * 32]
+ paddd m8, [pd_16]
+ psrld m8, 5
+ packusdw m6, m8
+
+ pmaddwd m7, m2, [r3 + 4 * 32] ; [20]
+ paddd m7, [pd_16]
+ psrld m7, 5
+ pmaddwd m8, m13, [r3 + 4 * 32]
+ paddd m8, [pd_16]
+ psrld m8, 5
+ packusdw m7, m8
+
+ pmaddwd m8, m2, [r3 - 1 * 32] ; [15]
+ paddd m8, [pd_16]
+ psrld m8, 5
+ pmaddwd m9, m13, [r3 - 1 * 32]
+ paddd m9, [pd_16]
+ psrld m9, 5
+ packusdw m8, m9
+
+ pmaddwd m9, m2, [r3 - 6 * 32] ; [10]
+ paddd m9, [pd_16]
+ psrld m9, 5
+ pmaddwd m10, m13, [r3 - 6 * 32]
+ paddd m10, [pd_16]
+ psrld m10, 5
+ packusdw m9, m10
+
+ pmaddwd m10, m2, [r3 - 11 * 32] ; [5]
+ paddd m10, [pd_16]
+ psrld m10, 5
+ pmaddwd m12, m13, [r3 - 11 * 32]
+ paddd m12, [pd_16]
+ psrld m12, 5
+ packusdw m10, m12
+
+ pmaddwd m2, [r3 - 16 * 32] ; [0]
+ paddd m2, [pd_16]
+ psrld m2, 5
+ pmaddwd m13, [r3 - 16 * 32]
+ paddd m13, [pd_16]
+ psrld m13, 5
+ packusdw m2, m13
+ TRANSPOSE_STORE_AVX2 4, 5, 6, 7, 8, 9, 10, 2, 0, 3, 16
+ ret
+
+cglobal intra_pred_ang32_12, 3,8,14, 0-16
+ movu xm0, [r2 + 114]
+ mova [rsp], xm0
+
+ add r1d, r1d
+ lea r4, [r1 * 3]
+ lea r3, [ang_table_avx2 + 16 * 32]
+
+ pinsrw xm1, [r2], 7 ; [0]
+ pinsrw xm1, [r2 + 12], 6 ; [6]
+ pinsrw xm1, [r2 + 26], 5 ; [13]
+ pinsrw xm1, [r2 + 38], 4 ; [19]
+ pinsrw xm1, [r2 + 52], 3 ; [26]
+ movu [r2 + 114], xm1
+
+ xor r6d, r6d
+ add r2, 128
+ lea r7, [r0 + 8 * r1]
+
+ call ang32_mode_12_24_0_15
+
+ lea r0, [r0 + 32]
+
+ call ang32_mode_12_24_16_31
+
+ add r2, 32
+ lea r0, [r7 + 8 * r1]
+
+ call ang32_mode_12_24_0_15
+
+ lea r0, [r0 + 32]
+
+ call ang32_mode_12_24_16_31
+
+ mova xm0, [rsp]
+ movu [r2 - 46], xm0
+ RET
+
+cglobal intra_pred_ang32_24, 3,7,14, 0-16
+ movu xm0, [r2 - 16]
+ mova [rsp], xm0
+
+ add r1d, r1d
+ lea r4, [r1 * 3]
+ lea r3, [ang_table_avx2 + 16 * 32]
+
+ pinsrw xm1, [r2 + 140], 7 ; [6]
+ pinsrw xm1, [r2 + 154], 6 ; [13]
+ pinsrw xm1, [r2 + 166], 5 ; [19]
+ pinsrw xm1, [r2 + 180], 4 ; [26]
+ movu [r2 - 16], xm1
+
+ xor r6d, r6d
+ inc r6d
+ lea r5, [r0 + 32]
+
+ call ang32_mode_12_24_0_15
+
+ call ang32_mode_12_24_16_31
+
+ add r2, 32
+ mov r0, r5
+
+ call ang32_mode_12_24_0_15
+
+ call ang32_mode_12_24_16_31
+
+ mova xm0, [rsp]
+ movu [r2 - 48], xm0
+ RET
;-------------------------------------------------------------------------------------------------------
; end of avx2 code for intra_pred_ang32 mode 2 to 34
;-------------------------------------------------------------------------------------------------------
More information about the x265-devel
mailing list