[x265] [PATCH 1 of 3] asm: avx2 intra_pred_ang32_23, improved by ~45% over SSE4

praveen at multicorewareinc.com praveen at multicorewareinc.com
Tue Mar 31 09:24:46 CEST 2015


# HG changeset patch
# User Praveen Tiwari <praveen at multicorewareinc.com>
# Date 1427698128 -19800
#      Mon Mar 30 12:18:48 2015 +0530
# Node ID c65ddafcd4c34e9fc21390c6508e3de633eb2cb2
# Parent  8f546de630c913b69a00ae154bf32e7a9b1a5db4
asm: avx2 intra_pred_ang32_23, improved by ~45% over SSE4

AVX2:
intra_ang_32x32[23]     15.76x   1925.55         30339.30

SSE4:
intra_ang_32x32[23]     8.63x    3516.52         30340.28

diff -r 8f546de630c9 -r c65ddafcd4c3 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Mon Mar 30 21:11:20 2015 -0500
+++ b/source/common/x86/asm-primitives.cpp	Mon Mar 30 12:18:48 2015 +0530
@@ -1647,6 +1647,7 @@
         p.cu[BLOCK_32x32].intra_pred[33] = x265_intra_pred_ang32_33_avx2;
         p.cu[BLOCK_32x32].intra_pred[25] = x265_intra_pred_ang32_25_avx2;
         p.cu[BLOCK_32x32].intra_pred[24] = x265_intra_pred_ang32_24_avx2;
+        p.cu[BLOCK_32x32].intra_pred[23] = x265_intra_pred_ang32_23_avx2;
 
         // copy_sp primitives
         p.cu[BLOCK_16x16].copy_sp = x265_blockcopy_sp_16x16_avx2;
diff -r 8f546de630c9 -r c65ddafcd4c3 source/common/x86/intrapred.h
--- a/source/common/x86/intrapred.h	Mon Mar 30 21:11:20 2015 -0500
+++ b/source/common/x86/intrapred.h	Mon Mar 30 12:18:48 2015 +0530
@@ -215,6 +215,7 @@
 void x265_intra_pred_ang32_33_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
 void x265_intra_pred_ang32_25_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
 void x265_intra_pred_ang32_24_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
+void x265_intra_pred_ang32_23_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
 void x265_all_angs_pred_4x4_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
 void x265_all_angs_pred_8x8_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
 void x265_all_angs_pred_16x16_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
diff -r 8f546de630c9 -r c65ddafcd4c3 source/common/x86/intrapred8.asm
--- a/source/common/x86/intrapred8.asm	Mon Mar 30 21:11:20 2015 -0500
+++ b/source/common/x86/intrapred8.asm	Mon Mar 30 12:18:48 2015 +0530
@@ -447,6 +447,27 @@
                    db 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5
                    db 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0
 
+
+ALIGN 32
+c_ang32_mode_23:  db 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14
+                  db 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5
+                  db 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19
+                  db 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1
+                  db 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15
+                  db 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6
+                  db 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20
+                  db 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2
+                  db 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
+                  db 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7
+                  db 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21
+                  db 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3
+                  db 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17
+                  db 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8
+                  db 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22
+                  db 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4
+                  db 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18
+                  db 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0
+
 ALIGN 32
 ;; (blkSize - 1 - x)
 pw_planar4_0:         dw 3,  2,  1,  0,  3,  2,  1,  0
@@ -14504,5 +14525,290 @@
     vpermq            m6, m6, 11011000b
     movu              [r0 + r3], m6
     RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang32_23, 3, 5, 11
+    mova              m0, [pw_1024]
+    mova              m1, [intra_pred_shuff_0_8]
+    lea               r3, [3 * r1]
+    lea               r4, [c_ang32_mode_23]
+
+    ;row[0, 1]
+    vbroadcasti128    m2, [r2 + 0]
+    pshufb            m2, m1
+    vbroadcasti128    m3, [r2 + 8]
+    pshufb            m3, m1
+    vbroadcasti128    m4, [r2 + 16]
+    pshufb            m4, m1
+    vbroadcasti128    m5, [r2 + 24]
+    pshufb            m5, m1
+
+    mova              m10, [r4 + 0 * mmsize]
+
+    INTRA_PRED_ANG32_CAL_ROW
+    movu              [r0], m7
+    movu              [r0 + r1], m6
+
+    ;row[2]
+    vperm2i128        m6, m2, m3, 00100000b
+    pmaddubsw         m6, [r4 + 1 * mmsize]
+    pmulhrsw          m6, m0
+    vperm2i128        m7, m4, m5, 00100000b
+    pmaddubsw         m7, [r4 + 1 * mmsize]
+    pmulhrsw          m7, m0
+    packuswb          m6, m7
+    vpermq            m6, m6, 11011000b
+    movu              [r0 + 2 * r1], m6
+
+    ;row[3, 4]
+    movu              xm2, [r2 - 1]
+    pinsrb            xm2, [r2 + 68], 0
+    vinserti128       m2, m2, xm2, 1
+    pshufb            m2, m1
+    vbroadcasti128    m3, [r2 + 7]
+    pshufb            m3, m1
+    vbroadcasti128    m4, [r2 + 15]
+    pshufb            m4, m1
+    vbroadcasti128    m5, [r2 + 23]
+    pshufb            m5, m1
+
+    mova              m10, [r4 + 2 * mmsize]
+
+    INTRA_PRED_ANG32_CAL_ROW
+    movu              [r0 + r3], m7
+    lea               r0, [r0 + 4 * r1]
+    movu              [r0], m6
+
+    ;row[5, 6]
+    mova              m10, [r4 + 3 * mmsize]
+
+    INTRA_PRED_ANG32_CAL_ROW
+    movu              [r0 + r1], m7
+    movu              [r0 + 2 * r1], m6
+
+    ;row[7, 8]
+    movu              xm2, [r2 - 2]
+    pinsrb            xm2, [r2 + 71], 0
+    pinsrb            xm2, [r2 + 68], 1
+    vinserti128       m2, m2, xm2, 1
+    pshufb            m2, m1
+    vbroadcasti128    m3, [r2 + 6]
+    pshufb            m3, m1
+    vbroadcasti128    m4, [r2 + 14]
+    pshufb            m4, m1
+    vbroadcasti128    m5, [r2 + 22]
+    pshufb            m5, m1
+
+    add               r4, 4 * mmsize
+    mova              m10, [r4 + 0 * mmsize]
+
+    INTRA_PRED_ANG32_CAL_ROW
+    movu              [r0 + r3], m7
+    lea               r0, [r0 + 4 * r1]
+    movu              [r0], m6
+
+    ;row[9]
+    vperm2i128        m6, m2, m3, 00100000b
+    pmaddubsw         m6, [r4 + 1 * mmsize]
+    pmulhrsw          m6, m0
+    vperm2i128        m7, m4, m5, 00100000b
+    pmaddubsw         m7, [r4 + 1 * mmsize]
+    pmulhrsw          m7, m0
+    packuswb          m6, m7
+    vpermq            m6, m6, 11011000b
+    movu              [r0 + r1], m6
+
+    ;row[10, 11]
+    movu              xm2, [r2 - 3]
+    pinsrb            xm2, [r2 + 75], 0
+    pinsrb            xm2, [r2 + 71], 1
+    pinsrb            xm2, [r2 + 68], 2
+    vinserti128       m2, m2, xm2, 1
+    pshufb            m2, m1
+    vbroadcasti128    m3, [r2 + 5]
+    pshufb            m3, m1
+    vbroadcasti128    m4, [r2 + 13]
+    pshufb            m4, m1
+    vbroadcasti128    m5, [r2 + 21]
+    pshufb            m5, m1
+
+    mova              m10, [r4 + 2 * mmsize]
+
+    INTRA_PRED_ANG32_CAL_ROW
+    movu              [r0 + 2 * r1], m7
+    movu              [r0 + r3], m6
+
+    ;row[12, 13]
+    lea               r0, [r0 + 4 * r1]
+    mova              m10, [r4 + 3 * mmsize]
+
+    INTRA_PRED_ANG32_CAL_ROW
+    movu              [r0], m7
+    movu              [r0 + r1], m6
+
+    ;row[14, 15]
+    movu              xm2, [r2 - 4]
+    pinsrb            xm2, [r2 + 78], 0
+    pinsrb            xm2, [r2 + 75], 1
+    pinsrb            xm2, [r2 + 71], 2
+    pinsrb            xm2, [r2 + 68], 3
+    vinserti128       m2, m2, xm2, 1
+    pshufb            m2, m1
+    vbroadcasti128    m3, [r2 + 4]
+    pshufb            m3, m1
+    vbroadcasti128    m4, [r2 + 12]
+    pshufb            m4, m1
+    vbroadcasti128    m5, [r2 + 20]
+    pshufb            m5, m1
+
+    add               r4, 4 * mmsize
+    mova              m10, [r4 + 0 * mmsize]
+
+    INTRA_PRED_ANG32_CAL_ROW
+    movu              [r0 + 2 * r1], m7
+    movu              [r0 + r3], m6
+
+    ;row[16]
+    lea               r0, [r0 + 4 * r1]
+    vperm2i128        m6, m2, m3, 00100000b
+    pmaddubsw         m6, [r4 + 1 * mmsize]
+    pmulhrsw          m6, m0
+    vperm2i128        m7, m4, m5, 00100000b
+    pmaddubsw         m7, [r4 + 1 * mmsize]
+    pmulhrsw          m7, m0
+    packuswb          m6, m7
+    vpermq            m6, m6, 11011000b
+    movu              [r0], m6
+
+    ;row[17, 18]
+    movu              xm2, [r2 - 5]
+    pinsrb            xm2, [r2 + 82], 0
+    pinsrb            xm2, [r2 + 78], 1
+    pinsrb            xm2, [r2 + 75], 2
+    pinsrb            xm2, [r2 + 71], 3
+    pinsrb            xm2, [r2 + 68], 4
+    vinserti128       m2, m2, xm2, 1
+    pshufb            m2, m1
+    vbroadcasti128    m3, [r2 + 3]
+    pshufb            m3, m1
+    vbroadcasti128    m4, [r2 + 11]
+    pshufb            m4, m1
+    vbroadcasti128    m5, [r2 + 19]
+    pshufb            m5, m1
+
+    mova              m10, [r4 + 2 * mmsize]
+
+    INTRA_PRED_ANG32_CAL_ROW
+    movu              [r0 + r1], m7
+    movu              [r0 + 2 * r1], m6
+
+    ;row[19, 20]
+    mova              m10, [r4 + 3 * mmsize]
+
+    INTRA_PRED_ANG32_CAL_ROW
+    movu              [r0 + r3], m7
+    lea               r0, [r0 + 4 * r1]
+    movu              [r0], m6
+
+    ;row[21, 22]
+    movu              xm2, [r2 - 6]
+    pinsrb            xm2, [r2 + 85], 0
+    pinsrb            xm2, [r2 + 82], 1
+    pinsrb            xm2, [r2 + 78], 2
+    pinsrb            xm2, [r2 + 75], 3
+    pinsrb            xm2, [r2 + 71], 4
+    pinsrb            xm2, [r2 + 68], 5
+    vinserti128       m2, m2, xm2, 1
+    pshufb            m2, m1
+    vbroadcasti128    m3, [r2 + 2]
+    pshufb            m3, m1
+    vbroadcasti128    m4, [r2 + 10]
+    pshufb            m4, m1
+    vbroadcasti128    m5, [r2 + 18]
+    pshufb            m5, m1
+
+    add               r4, 4 * mmsize
+    mova              m10, [r4 + 0 * mmsize]
+
+    INTRA_PRED_ANG32_CAL_ROW
+    movu              [r0 + r1], m7
+    movu              [r0 + 2 * r1], m6
+
+    ;row[23]
+    vperm2i128        m6, m2, m3, 00100000b
+    pmaddubsw         m6, [r4 + 1 * mmsize]
+    pmulhrsw          m6, m0
+    vperm2i128        m7, m4, m5, 00100000b
+    pmaddubsw         m7, [r4 + 1 * mmsize]
+    pmulhrsw          m7, m0
+    packuswb          m6, m7
+    vpermq            m6, m6, 11011000b
+    movu              [r0 + r3], m6
+
+    ;row[24, 25]
+    movu              xm2, [r2 - 7]
+    pinsrb            xm2, [r2 + 89], 0
+    pinsrb            xm2, [r2 + 85], 1
+    pinsrb            xm2, [r2 + 82], 2
+    pinsrb            xm2, [r2 + 78], 3
+    pinsrb            xm2, [r2 + 75], 4
+    pinsrb            xm2, [r2 + 71], 5
+    pinsrb            xm2, [r2 + 68], 6
+    vinserti128       m2, m2, xm2, 1
+    pshufb            m2, m1
+    vbroadcasti128    m3, [r2 + 1]
+    pshufb            m3, m1
+    vbroadcasti128    m4, [r2 + 9]
+    pshufb            m4, m1
+    vbroadcasti128    m5, [r2 + 17]
+    pshufb            m5, m1
+
+    mova              m10, [r4 + 2 * mmsize]
+    lea               r0, [r0 + 4 * r1]
+
+    INTRA_PRED_ANG32_CAL_ROW
+    movu              [r0], m7
+    movu              [r0 + r1], m6
+
+    ;row[26, 27]
+    mova              m10, [r4 + 3 * mmsize]
+
+    INTRA_PRED_ANG32_CAL_ROW
+    movu              [r0 + 2 * r1], m7
+    movu              [r0 + r3], m6
+
+    ;row[28, 29]
+    movu              xm2, [r2 - 8]
+    pinsrb            xm2, [r2 + 92], 0
+    pinsrb            xm2, [r2 + 89], 1
+    pinsrb            xm2, [r2 + 85], 2
+    pinsrb            xm2, [r2 + 82], 3
+    pinsrb            xm2, [r2 + 78], 4
+    pinsrb            xm2, [r2 + 75], 5
+    pinsrb            xm2, [r2 + 71], 6
+    pinsrb            xm2, [r2 + 68], 7
+    vinserti128       m2, m2, xm2, 1
+    pshufb            m2, m1
+    vbroadcasti128    m3, [r2 + 0]
+    pshufb            m3, m1
+    vbroadcasti128    m4, [r2 + 8]
+    pshufb            m4, m1
+    vbroadcasti128    m5, [r2 + 16]
+    pshufb            m5, m1
+
+    add               r4, 4 * mmsize
+    mova              m10, [r4 + 0 * mmsize]
+    lea               r0, [r0 + 4 * r1]
+
+    INTRA_PRED_ANG32_CAL_ROW
+    movu              [r0], m7
+    movu              [r0 + r1], m6
+
+    ;row[30, 31]
+    mova              m10, [r4 + 1 * mmsize]
+
+    INTRA_PRED_ANG32_CAL_ROW
+    movu              [r0 + 2 * r1], m7
+    movu              [r0 + r3], m6
+    RET
 %endif
-


More information about the x265-devel mailing list