[x265] [PATCH] asm: 10bpp avx2 code for intra_pred_ang32x32 mode 15 & 21
dnyaneshwar at multicorewareinc.com
dnyaneshwar at multicorewareinc.com
Tue Jun 16 11:10:50 CEST 2015
# HG changeset patch
# User Dnyaneshwar G <dnyaneshwar at multicorewareinc.com>
# Date 1434344695 -19800
# Mon Jun 15 10:34:55 2015 +0530
# Node ID 9796fe79ac615fce0ed9e8b503ca5069525df625
# Parent 534ba74791442449ff7d66d5c0bd3ad208af7be4
asm: 10bpp avx2 code for intra_pred_ang32x32 mode 15 & 21
performance improvement over SSE:
intra_ang_32x32[15] 8337c->4609c, 44%
intra_ang_32x32[21] 6303c->3238c, 48%
diff -r 534ba7479144 -r 9796fe79ac61 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp Mon Jun 15 10:03:37 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp Mon Jun 15 10:34:55 2015 +0530
@@ -1267,6 +1267,8 @@
p.cu[BLOCK_32x32].intra_pred[12] = x265_intra_pred_ang32_12_avx2;
p.cu[BLOCK_32x32].intra_pred[13] = x265_intra_pred_ang32_13_avx2;
p.cu[BLOCK_32x32].intra_pred[14] = x265_intra_pred_ang32_14_avx2;
+ p.cu[BLOCK_32x32].intra_pred[15] = x265_intra_pred_ang32_15_avx2;
+ p.cu[BLOCK_32x32].intra_pred[21] = x265_intra_pred_ang32_21_avx2;
p.cu[BLOCK_32x32].intra_pred[22] = x265_intra_pred_ang32_22_avx2;
p.cu[BLOCK_32x32].intra_pred[23] = x265_intra_pred_ang32_23_avx2;
p.cu[BLOCK_32x32].intra_pred[24] = x265_intra_pred_ang32_24_avx2;
diff -r 534ba7479144 -r 9796fe79ac61 source/common/x86/intrapred.h
--- a/source/common/x86/intrapred.h Mon Jun 15 10:03:37 2015 +0530
+++ b/source/common/x86/intrapred.h Mon Jun 15 10:34:55 2015 +0530
@@ -287,6 +287,8 @@
void x265_intra_pred_ang32_11_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
void x265_intra_pred_ang32_12_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
void x265_intra_pred_ang32_14_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
+void x265_intra_pred_ang32_15_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
+void x265_intra_pred_ang32_20_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
void x265_intra_pred_ang32_13_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
void x265_intra_pred_ang32_26_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
void x265_intra_pred_ang32_27_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
diff -r 534ba7479144 -r 9796fe79ac61 source/common/x86/intrapred16.asm
--- a/source/common/x86/intrapred16.asm Mon Jun 15 10:03:37 2015 +0530
+++ b/source/common/x86/intrapred16.asm Mon Jun 15 10:34:55 2015 +0530
@@ -51,6 +51,7 @@
const pw_ang32_12_24, db 0, 1, 0, 1, 2, 3, 2, 3, 4, 5, 4, 5, 6, 7, 6, 7
const pw_ang32_13_23, db 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 14, 15, 6, 7, 0, 1
const pw_ang32_14_22, db 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 11, 6, 7, 0, 1
+const pw_ang32_15_21, db 0, 0, 0, 0, 0, 0, 0, 0, 12, 13, 8, 9, 4, 5, 0, 1
const shuf_mode_13_23, db 0, 0, 14, 15, 6, 7, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0
const shuf_mode_14_22, db 14, 15, 10, 11, 4, 5, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0
@@ -16026,6 +16027,439 @@
mov rsp, [rsp+4*mmsize]
RET
+
+;; angle 32, modes 15 and 21, row 0 to 15
+cglobal ang32_mode_15_21_rows_0_15
+ test r6d, r6d
+
+ movu m0, [r2 - 16]
+ movu m1, [r2 - 14]
+
+ punpcklwd m3, m0, m1
+ punpckhwd m0, m1
+
+ movu m1, [r2]
+ movu m4, [r2 + 2]
+ punpcklwd m2, m1, m4
+ punpckhwd m1, m4
+
+ pmaddwd m4, m3, [r3] ; [16]
+ paddd m4, [pd_16]
+ psrld m4, 5
+ pmaddwd m5, m0, [r3]
+ paddd m5, [pd_16]
+ psrld m5, 5
+ packusdw m4, m5
+
+ palignr m6, m0, m3, 4
+ palignr m7, m2, m0, 4
+ pmaddwd m5, m6, [r3 - 15 * 32] ; [1]
+ paddd m5, [pd_16]
+ psrld m5, 5
+ pmaddwd m8, m7, [r3 - 15 * 32]
+ paddd m8, [pd_16]
+ psrld m8, 5
+ packusdw m5, m8
+
+ pmaddwd m6, [r3 + 2 * 32] ; [18]
+ paddd m6, [pd_16]
+ psrld m6, 5
+ pmaddwd m7, [r3 + 2 * 32]
+ paddd m7, [pd_16]
+ psrld m7, 5
+ packusdw m6, m7
+
+ palignr m8, m0, m3, 8
+ palignr m9, m2, m0, 8
+ pmaddwd m7, m8, [r3 - 13 * 32] ; [3]
+ paddd m7, [pd_16]
+ psrld m7, 5
+ pmaddwd m10, m9, [r3 - 13 * 32]
+ paddd m10, [pd_16]
+ psrld m10, 5
+ packusdw m7, m10
+
+ pmaddwd m8, [r3 + 4 * 32] ; [20]
+ paddd m8, [pd_16]
+ psrld m8, 5
+ pmaddwd m9, [r3 + 4 * 32]
+ paddd m9, [pd_16]
+ psrld m9, 5
+ packusdw m8, m9
+
+ palignr m10, m0, m3, 12
+ palignr m11, m2, m0, 12
+ pmaddwd m9, m10, [r3 - 11 * 32] ; [5]
+ paddd m9, [pd_16]
+ psrld m9, 5
+ pmaddwd m12, m11, [r3 - 11 * 32]
+ paddd m12, [pd_16]
+ psrld m12, 5
+ packusdw m9, m12
+
+ pmaddwd m10, [r3 + 6 * 32] ; [22]
+ paddd m10, [pd_16]
+ psrld m10, 5
+ pmaddwd m11, [r3 + 6 * 32]
+ paddd m11, [pd_16]
+ psrld m11, 5
+ packusdw m10, m11
+
+ pmaddwd m11, m0, [r3 - 9 * 32] ; [7]
+ paddd m11, [pd_16]
+ psrld m11, 5
+ pmaddwd m12, m2, [r3 - 9 * 32]
+ paddd m12, [pd_16]
+ psrld m12, 5
+ packusdw m11, m12
+
+ TRANSPOSE_STORE_AVX2_STACK 11, 10, 9, 8, 7, 6, 5, 4, 12, 13, 16
+
+ pmaddwd m4, m0, [r3 + 8 * 32] ; [24]
+ paddd m4, [pd_16]
+ psrld m4, 5
+ pmaddwd m5, m2, [r3 + 8 * 32]
+ paddd m5, [pd_16]
+ psrld m5, 5
+ packusdw m4, m5
+
+ palignr m6, m2, m0, 4
+ palignr m7, m1, m2, 4
+ pmaddwd m5, m6, [r3 - 7 * 32] ; [9]
+ paddd m5, [pd_16]
+ psrld m5, 5
+ pmaddwd m3, m7, [r3 - 7 * 32]
+ paddd m3, [pd_16]
+ psrld m3, 5
+ packusdw m5, m3
+
+ pmaddwd m6, [r3 + 10 * 32] ; [26]
+ paddd m6, [pd_16]
+ psrld m6, 5
+ pmaddwd m7, [r3 + 10 * 32]
+ paddd m7, [pd_16]
+ psrld m7, 5
+ packusdw m6, m7
+
+ palignr m8, m2, m0, 8
+ palignr m9, m1, m2, 8
+ pmaddwd m7, m8, [r3 - 5 * 32] ; [11]
+ paddd m7, [pd_16]
+ psrld m7, 5
+ pmaddwd m3, m9, [r3 - 5 * 32]
+ paddd m3, [pd_16]
+ psrld m3, 5
+ packusdw m7, m3
+
+ pmaddwd m8, [r3 + 12 * 32] ; [28]
+ paddd m8, [pd_16]
+ psrld m8, 5
+ pmaddwd m9, [r3 + 12 * 32]
+ paddd m9, [pd_16]
+ psrld m9, 5
+ packusdw m8, m9
+
+ palignr m10, m2, m0, 12
+ palignr m11, m1, m2, 12
+ pmaddwd m9, m10, [r3 - 3 * 32] ; [13]
+ paddd m9, [pd_16]
+ psrld m9, 5
+ pmaddwd m3, m11, [r3 - 3 * 32]
+ paddd m3, [pd_16]
+ psrld m3, 5
+ packusdw m9, m3
+
+ pmaddwd m10, [r3 + 14 * 32] ; [30]
+ paddd m10, [pd_16]
+ psrld m10, 5
+ pmaddwd m11, [r3 + 14 * 32]
+ paddd m11, [pd_16]
+ psrld m11, 5
+ packusdw m10, m11
+
+ pmaddwd m2, [r3 - 1 * 32] ; [15]
+ paddd m2, [pd_16]
+ psrld m2, 5
+ pmaddwd m1, [r3 - 1 * 32]
+ paddd m1, [pd_16]
+ psrld m1, 5
+ packusdw m2, m1
+ TRANSPOSE_STORE_AVX2_STACK 2, 10, 9, 8, 7, 6, 5, 4, 0, 1, 0
+ ret
+
+;; angle 32, modes 15 and 21, rows 16 to 31
+cglobal ang32_mode_15_21_rows_16_31
+ test r6d, r6d
+
+ movu m0, [r2 - 32]
+ movu m1, [r2 - 30]
+
+ punpcklwd m3, m0, m1
+ punpckhwd m0, m1
+
+ movu m1, [r2 - 16]
+ movu m4, [r2 - 14]
+ punpcklwd m2, m1, m4
+ punpckhwd m1, m4
+
+ pmaddwd m4, m3, [r3 - 16 * 32] ; [0]
+ paddd m4, [pd_16]
+ psrld m4, 5
+ pmaddwd m5, m0, [r3 - 16 * 32]
+ paddd m5, [pd_16]
+ psrld m5, 5
+ packusdw m4, m5
+
+ pmaddwd m5, m3, [r3 + 1 * 32] ; [17]
+ paddd m5, [pd_16]
+ psrld m5, 5
+ pmaddwd m8, m0, [r3 + 1 * 32]
+ paddd m8, [pd_16]
+ psrld m8, 5
+ packusdw m5, m8
+
+ palignr m7, m0, m3, 4
+ palignr m8, m2, m0, 4
+ pmaddwd m6, m7, [r3 - 14 * 32] ; [2]
+ paddd m6, [pd_16]
+ psrld m6, 5
+ pmaddwd m9, m8, [r3 - 14 * 32]
+ paddd m9, [pd_16]
+ psrld m9, 5
+ packusdw m6, m9
+
+ pmaddwd m7, [r3 + 3 * 32] ; [19]
+ paddd m7, [pd_16]
+ psrld m7, 5
+ pmaddwd m8, [r3 + 3 * 32]
+ paddd m8, [pd_16]
+ psrld m8, 5
+ packusdw m7, m8
+
+ palignr m9, m0, m3, 8
+ palignr m10, m2, m0, 8
+ pmaddwd m8, m9, [r3 - 12 * 32] ; [4]
+ paddd m8, [pd_16]
+ psrld m8, 5
+ pmaddwd m11, m10, [r3 - 12 * 32]
+ paddd m11, [pd_16]
+ psrld m11, 5
+ packusdw m8, m11
+
+ pmaddwd m9, [r3 + 5 * 32] ; [21]
+ paddd m9, [pd_16]
+ psrld m9, 5
+ pmaddwd m10, [r3 + 5 * 32]
+ paddd m10, [pd_16]
+ psrld m10, 5
+ packusdw m9, m10
+
+ palignr m11, m0, m3, 12
+ palignr m12, m2, m0, 12
+ pmaddwd m10, m11, [r3 - 10 * 32] ; [6]
+ paddd m10, [pd_16]
+ psrld m10, 5
+ pmaddwd m13, m12, [r3 - 10 * 32]
+ paddd m13, [pd_16]
+ psrld m13, 5
+ packusdw m10, m13
+
+ pmaddwd m11, [r3 + 7 * 32] ; [23]
+ paddd m11, [pd_16]
+ psrld m11, 5
+ pmaddwd m12, [r3 + 7 * 32]
+ paddd m12, [pd_16]
+ psrld m12, 5
+ packusdw m11, m12
+
+ TRANSPOSE_STORE_AVX2_STACK 11, 10, 9, 8, 7, 6, 5, 4, 12, 13, 16
+
+ pmaddwd m4, m0, [r3 - 8 * 32] ; [8]
+ paddd m4, [pd_16]
+ psrld m4, 5
+ pmaddwd m7, m2, [r3 - 8 * 32]
+ paddd m7, [pd_16]
+ psrld m7, 5
+ packusdw m4, m7
+
+ pmaddwd m5, m0, [r3 + 9 * 32] ; [25]
+ paddd m5, [pd_16]
+ psrld m5, 5
+ pmaddwd m6, m2, [r3 + 9 * 32]
+ paddd m6, [pd_16]
+ psrld m6, 5
+ packusdw m5, m6
+
+ palignr m7, m2, m0, 4
+ palignr m8, m1, m2, 4
+ pmaddwd m6, m7, [r3 - 6 * 32] ; [10]
+ paddd m6, [pd_16]
+ psrld m6, 5
+ pmaddwd m3, m8, [r3 - 6 * 32]
+ paddd m3, [pd_16]
+ psrld m3, 5
+ packusdw m6, m3
+
+ pmaddwd m7, [r3 + 11 * 32] ; [27]
+ paddd m7, [pd_16]
+ psrld m7, 5
+ pmaddwd m8, [r3 + 11 * 32]
+ paddd m8, [pd_16]
+ psrld m8, 5
+ packusdw m7, m8
+
+ palignr m9, m2, m0, 8
+ palignr m3, m1, m2, 8
+ pmaddwd m8, m9, [r3 - 4 * 32] ; [12]
+ paddd m8, [pd_16]
+ psrld m8, 5
+ pmaddwd m11, m3, [r3 - 4 * 32]
+ paddd m11, [pd_16]
+ psrld m11, 5
+ packusdw m8, m11
+
+ pmaddwd m9, [r3 + 13 * 32] ; [29]
+ paddd m9, [pd_16]
+ psrld m9, 5
+ pmaddwd m3, [r3 + 13 * 32]
+ paddd m3, [pd_16]
+ psrld m3, 5
+ packusdw m9, m3
+
+ palignr m1, m2, 12
+ palignr m2, m0, 12
+ pmaddwd m10, m2, [r3 - 2 * 32] ; [14]
+ paddd m10, [pd_16]
+ psrld m10, 5
+ pmaddwd m11, m1, [r3 - 2 * 32]
+ paddd m11, [pd_16]
+ psrld m11, 5
+ packusdw m10, m11
+
+ pmaddwd m2, [r3 + 15 * 32] ; [31]
+ paddd m2, [pd_16]
+ psrld m2, 5
+ pmaddwd m1, [r3 + 15 * 32]
+ paddd m1, [pd_16]
+ psrld m1, 5
+ packusdw m2, m1
+ TRANSPOSE_STORE_AVX2_STACK 2, 10, 9, 8, 7, 6, 5, 4, 0, 1, 0
+ ret
+
+cglobal intra_pred_ang32_15, 3,8,14
+ mov r6, rsp
+ sub rsp, 4*mmsize+gprsize
+ and rsp, ~63
+ mov [rsp+4*mmsize], r6
+
+ movu m0, [r2 + 128]
+ movu m1, [r2 + 160]
+ movd xm2, [r2 + 192]
+
+ mova [rsp + 1*mmsize], m0
+ mova [rsp + 2*mmsize], m1
+ movd [rsp + 3*mmsize], xm2
+
+ add r1d, r1d
+ lea r4, [r1 * 3]
+ lea r3, [ang_table_avx2 + 16 * 32]
+
+ movu xm1, [r2 + 4]
+ movu xm2, [r2 + 18]
+ movu xm3, [r2 + 34]
+ movu xm4, [r2 + 48]
+ pshufb xm1, [pw_ang32_15_21]
+ pshufb xm2, [pw_ang32_15_21]
+ pshufb xm3, [pw_ang32_15_21]
+ pshufb xm4, [pw_ang32_15_21]
+
+ punpckhqdq xm2, xm1
+ punpckhqdq xm4, xm3
+
+ movzx r6d, word [r2]
+ mov [rsp + 1*mmsize], r6w
+ movu [rsp + 16], xm2
+ movu [rsp], xm4
+
+ xor r6d, r6d
+ lea r2, [rsp + 1*mmsize]
+ lea r7, [r0 + 8 * r1]
+
+ call ang32_mode_15_21_rows_0_15
+
+ lea r0, [r0 + 32]
+
+ call ang32_mode_15_21_rows_16_31
+
+ add r2, 32
+ lea r0, [r7 + 8 * r1]
+
+ call ang32_mode_15_21_rows_0_15
+
+ lea r0, [r0 + 32]
+
+ call ang32_mode_15_21_rows_16_31
+
+ mov rsp, [rsp+4*mmsize]
+ RET
+
+cglobal intra_pred_ang32_21, 3,8,14
+ mov r6, rsp
+ sub rsp, 4*mmsize+gprsize
+ and rsp, ~63
+ mov [rsp+4*mmsize], r6
+
+ movu m0, [r2]
+ movu m1, [r2 + 32]
+ movd xm2, [r2 + 64]
+
+ mova [rsp + 1*mmsize], m0
+ mova [rsp + 2*mmsize], m1
+ movd [rsp + 3*mmsize], xm2
+
+ add r1d, r1d
+ lea r4, [r1 * 3]
+ lea r3, [ang_table_avx2 + 16 * 32]
+
+ movu xm1, [r2 + 132]
+ movu xm2, [r2 + 146]
+ movu xm3, [r2 + 162]
+ movu xm4, [r2 + 176]
+ pshufb xm1, [pw_ang32_15_21]
+ pshufb xm2, [pw_ang32_15_21]
+ pshufb xm3, [pw_ang32_15_21]
+ pshufb xm4, [pw_ang32_15_21]
+
+ punpckhqdq xm2, xm1
+ punpckhqdq xm4, xm3
+
+ movu [rsp + 16], xm2
+ movu [rsp], xm4
+
+ xor r6d, r6d
+ inc r6d
+ lea r2, [rsp + 1*mmsize]
+ lea r5, [r0 + 32]
+
+ call ang32_mode_15_21_rows_0_15
+
+ lea r0, [r0 + 8 * r1]
+ lea r0, [r0 + 8 * r1]
+
+ call ang32_mode_15_21_rows_16_31
+
+ add r2, 32
+ mov r0, r5
+
+ call ang32_mode_15_21_rows_0_15
+
+ lea r0, [r0 + 8 * r1]
+ lea r0, [r0 + 8 * r1]
+
+ call ang32_mode_15_21_rows_16_31
+
+ mov rsp, [rsp+4*mmsize]
+ RET
;-------------------------------------------------------------------------------------------------------
; end of avx2 code for intra_pred_ang32 mode 2 to 34
;-------------------------------------------------------------------------------------------------------
More information about the x265-devel
mailing list