[x265] [PATCH 4 of 4] asm: intra_pred_ang16_3 improved by ~25% over SSE4

praveen at multicorewareinc.com praveen at multicorewareinc.com
Wed Apr 8 13:30:09 CEST 2015


# HG changeset patch
# User Praveen Tiwari <praveen at multicorewareinc.com>
# Date 1428488823 -19800
#      Wed Apr 08 15:57:03 2015 +0530
# Node ID 811d84129896919403889585f38fc4189bc59b0a
# Parent  77dbb7d0c8a69b6f0f5977282adb7acb106f7442
asm: intra_pred_ang16_3 improved by ~25% over SSE4

AVX2:
intra_ang_16x16[ 3]     11.83x   1005.19         11894.02

SSE4:
intra_ang_16x16[ 3]     8.50x    1346.42         11439.88

diff -r 77dbb7d0c8a6 -r 811d84129896 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Wed Apr 08 15:15:01 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp	Wed Apr 08 15:57:03 2015 +0530
@@ -1761,6 +1761,7 @@
         p.cu[BLOCK_8x8].intra_pred[12] = x265_intra_pred_ang8_12_avx2;
         p.cu[BLOCK_8x8].intra_pred[24] = x265_intra_pred_ang8_24_avx2;
         p.cu[BLOCK_8x8].intra_pred[11] = x265_intra_pred_ang8_11_avx2;
+        p.cu[BLOCK_16x16].intra_pred[3] = x265_intra_pred_ang16_3_avx2;
         p.cu[BLOCK_16x16].intra_pred[4] = x265_intra_pred_ang16_4_avx2;
         p.cu[BLOCK_16x16].intra_pred[5] = x265_intra_pred_ang16_5_avx2;
         p.cu[BLOCK_16x16].intra_pred[6] = x265_intra_pred_ang16_6_avx2;
diff -r 77dbb7d0c8a6 -r 811d84129896 source/common/x86/intrapred.h
--- a/source/common/x86/intrapred.h	Wed Apr 08 15:15:01 2015 +0530
+++ b/source/common/x86/intrapred.h	Wed Apr 08 15:57:03 2015 +0530
@@ -233,6 +233,7 @@
 void x265_intra_pred_ang8_12_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
 void x265_intra_pred_ang8_24_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
 void x265_intra_pred_ang8_11_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
+void x265_intra_pred_ang16_3_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
 void x265_intra_pred_ang16_4_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
 void x265_intra_pred_ang16_5_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
 void x265_intra_pred_ang16_6_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
diff -r 77dbb7d0c8a6 -r 811d84129896 source/common/x86/intrapred8.asm
--- a/source/common/x86/intrapred8.asm	Wed Apr 08 15:15:01 2015 +0530
+++ b/source/common/x86/intrapred8.asm	Wed Apr 08 15:57:03 2015 +0530
@@ -282,6 +282,16 @@
                      db 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6
                      db 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0
 
+ALIGN 32
+c_ang16_mode_3:      db 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10
+                     db 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4
+                     db 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30
+                     db 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24
+                     db 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18
+                     db 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12
+                     db 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6
+                     db 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0
+
 ALIGN 32
 c_ang16_mode_24:     db 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22
                      db 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12
@@ -12087,6 +12097,132 @@
     INTRA_PRED_TRANS_STORE_16x16
     RET
 
+
+INIT_YMM avx2
+cglobal intra_pred_ang16_3, 3, 6, 12
+    mova              m11, [pw_1024]
+    lea               r5, [intra_pred_shuff_0_8]
+
+    movu              xm9, [r2 + 1 + 32]
+    pshufb            xm9, [r5]
+    movu              xm10, [r2 + 9 + 32]
+    pshufb            xm10, [r5]
+
+    movu              xm7, [r2 + 8 + 32]
+    pshufb            xm7, [r5]
+    vinserti128       m9, m9, xm7, 1
+
+    movu              xm8, [r2 + 16 + 32]
+    pshufb            xm8, [r5]
+    vinserti128       m10, m10, xm8, 1
+
+    lea               r3, [3 * r1]
+    lea               r4, [c_ang16_mode_3]
+
+    INTRA_PRED_ANG16_CAL_ROW m0, m1, 0
+
+    movu              xm9, [r2 + 2 + 32]
+    pshufb            xm9, [r5]
+    movu              xm10, [r2 + 10 + 32]
+    pshufb            xm10, [r5]
+
+    movu              xm7, [r2 + 9 + 32]
+    pshufb            xm7, [r5]
+    vinserti128       m9, m9, xm7, 1
+
+    movu              xm8, [r2 + 17 + 32]
+    pshufb            xm8, [r5]
+    vinserti128       m10, m10, xm8, 1
+
+    INTRA_PRED_ANG16_CAL_ROW m1, m2, 1
+
+    movu              xm7, [r2 + 3 + 32]
+    pshufb            xm7, [r5]
+    vinserti128       m9, m9, xm7, 0
+
+    movu              xm8, [r2 + 11 + 32]
+    pshufb            xm8, [r5]
+    vinserti128       m10, m10, xm8, 0
+
+    INTRA_PRED_ANG16_CAL_ROW m2, m3, 2
+
+    movu              xm9, [r2 + 4 + 32]
+    pshufb            xm9, [r5]
+    movu              xm10, [r2 + 12 + 32]
+    pshufb            xm10, [r5]
+
+    movu              xm7, [r2 + 10 + 32]
+    pshufb            xm7, [r5]
+    vinserti128       m9, m9, xm7, 1
+
+    movu              xm8, [r2 + 18 + 32]
+    pshufb            xm8, [r5]
+    vinserti128       m10, m10, xm8, 1
+
+    INTRA_PRED_ANG16_CAL_ROW m3, m4, 3
+
+    movu              xm9, [r2 + 5 + 32]
+    pshufb            xm9, [r5]
+    movu              xm10, [r2 + 13 + 32]
+    pshufb            xm10, [r5]
+
+    movu              xm7, [r2 + 11 + 32]
+    pshufb            xm7, [r5]
+    vinserti128       m9, m9, xm7, 1
+
+    movu              xm8, [r2 + 19 + 32]
+    pshufb            xm8, [r5]
+    vinserti128       m10, m10, xm8, 1
+
+    add               r4, 4 * mmsize
+
+    INTRA_PRED_ANG16_CAL_ROW m4, m5, 0
+
+    movu              xm7, [r2 + 12 + 32]
+    pshufb            xm7, [r5]
+    vinserti128       m9, m9, xm7, 1
+
+    movu              xm8, [r2 + 20 + 32]
+    pshufb            xm8, [r5]
+    vinserti128       m10, m10, xm8, 1
+
+    INTRA_PRED_ANG16_CAL_ROW m5, m6, 1
+
+    movu              xm9, [r2 + 6 + 32]
+    pshufb            xm9, [r5]
+    movu              xm10, [r2 + 14 + 32]
+    pshufb            xm10, [r5]
+
+    movu              xm7, [r2 + 13 + 32]
+    pshufb            xm7, [r5]
+    vinserti128       m9, m9, xm7, 1
+
+    movu              xm8, [r2 + 21 + 32]
+    pshufb            xm8, [r5]
+    vinserti128       m10, m10, xm8, 1
+
+    INTRA_PRED_ANG16_CAL_ROW m6, m7, 2
+
+    movu              xm9, [r2 + 7 + 32]
+    pshufb            xm9, [r5]
+    movu              xm10, [r2 + 15 + 32]
+    pshufb            xm10, [r5]
+
+    movu              xm7, [r2 + 14 + 32]
+    pshufb            xm7, [r5]
+    vinserti128       m9, m9, xm7, 1
+
+    movu              xm8, [r2 + 22 + 32]
+    pshufb            xm8, [r5]
+    vinserti128       m10, m10, xm8, 1
+
+    INTRA_PRED_ANG16_CAL_ROW m7, m8, 3
+
+    ; transpose and store
+    INTRA_PRED_TRANS_STORE_16x16
+    RET
+
+
 INIT_YMM avx2
 cglobal intra_pred_ang16_4, 3, 6, 12
     mova              m11, [pw_1024]


More information about the x265-devel mailing list