[x265] [PATCH 2 of 3] asm: intra_pred_ang32_21 improved by ~27% over SSE4, 3439.25c -> 2504.30c

praveen at multicorewareinc.com praveen at multicorewareinc.com
Wed Apr 1 07:39:31 CEST 2015


# HG changeset patch
# User Praveen Tiwari <praveen at multicorewareinc.com>
# Date 1427804388 -19800
#      Tue Mar 31 17:49:48 2015 +0530
# Node ID 51f4a0edf3c9f62fb5fe5167da0553143f28e0ba
# Parent  6b9308ad7368a9f18e13d29e177dd2e720996ccb
asm: intra_pred_ang32_21 improved by ~27% over SSE4, 3439.25c -> 2504.30c

diff -r 6b9308ad7368 -r 51f4a0edf3c9 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Tue Mar 31 14:17:39 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp	Tue Mar 31 17:49:48 2015 +0530
@@ -1649,6 +1649,7 @@
         p.cu[BLOCK_32x32].intra_pred[24] = x265_intra_pred_ang32_24_avx2;
         p.cu[BLOCK_32x32].intra_pred[23] = x265_intra_pred_ang32_23_avx2;
         p.cu[BLOCK_32x32].intra_pred[22] = x265_intra_pred_ang32_22_avx2;
+        p.cu[BLOCK_32x32].intra_pred[21] = x265_intra_pred_ang32_21_avx2;
 
         // copy_sp primitives
         p.cu[BLOCK_16x16].copy_sp = x265_blockcopy_sp_16x16_avx2;
diff -r 6b9308ad7368 -r 51f4a0edf3c9 source/common/x86/intrapred.h
--- a/source/common/x86/intrapred.h	Tue Mar 31 14:17:39 2015 +0530
+++ b/source/common/x86/intrapred.h	Tue Mar 31 17:49:48 2015 +0530
@@ -217,6 +217,7 @@
 void x265_intra_pred_ang32_24_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
 void x265_intra_pred_ang32_23_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
 void x265_intra_pred_ang32_22_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
+void x265_intra_pred_ang32_21_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
 void x265_all_angs_pred_4x4_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
 void x265_all_angs_pred_8x8_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
 void x265_all_angs_pred_16x16_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
diff -r 6b9308ad7368 -r 51f4a0edf3c9 source/common/x86/intrapred8.asm
--- a/source/common/x86/intrapred8.asm	Tue Mar 31 14:17:39 2015 +0530
+++ b/source/common/x86/intrapred8.asm	Tue Mar 31 17:49:48 2015 +0530
@@ -509,6 +509,26 @@
 c_mode32_23_shuff3:      times 2 db 14, 10, 7, 3, 0, 0, 0
 c_mode32_23_shuff4:      times 2 db 10, 7, 3, 0, 0, 0, 0, 0
 
+
+ALIGN 32
+c_ang32_mode_21: db 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15
+                 db 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13
+                 db 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11
+                 db 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9
+                 db 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7
+                 db 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5
+                 db 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3
+                 db 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1
+                 db 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
+                 db 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14
+                 db 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12
+                 db 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10
+                 db 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8
+                 db 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6
+                 db 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4
+                 db 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2
+                 db 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0, 32, 0
+
 ALIGN 32
 ;; (blkSize - 1 - x)
 pw_planar4_0:         dw 3,  2,  1,  0,  3,  2,  1,  0
@@ -15181,4 +15201,349 @@
     vpermq            m6, m6, 11011000b
     movu              [r0 + r3], m6
     RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang32_21, 3, 5, 13
+    mova              m0, [pw_1024]
+    mova              m1, [intra_pred_shuff_0_8]
+    lea               r3, [3 * r1]
+    lea               r4, [c_ang32_mode_21]
+
+    ;row[0]
+    vbroadcasti128    m11, [r2 + 0]
+    pshufb            m2, m11, m1
+    vbroadcasti128    m3, [r2 + 8]
+    pshufb            m3, m1
+    vbroadcasti128    m4, [r2 + 16]
+    pshufb            m4, m1
+    vbroadcasti128    m5, [r2 + 24]
+    pshufb            m5, m1
+
+    vperm2i128        m6, m2, m3, 00100000b
+    pmaddubsw         m6, [r4 + 0 * mmsize]
+    pmulhrsw          m6, m0
+    vperm2i128        m7, m4, m5, 00100000b
+    pmaddubsw         m7, [r4 + 0 * mmsize]
+    pmulhrsw          m7, m0
+    packuswb          m6, m7
+    vpermq            m6, m6, 11011000b
+    movu              [r0], m6
+
+    ;row[1, 2]
+    pslldq            xm11, 1
+    pinsrb            xm11, [r2 + 66], 0
+    vinserti128       m2, m11, xm11, 1
+    pshufb            m2, m1
+    vbroadcasti128    m3, [r2 + 7]
+    pshufb            m3, m1
+    vbroadcasti128    m4, [r2 + 15]
+    pshufb            m4, m1
+    vbroadcasti128    m5, [r2 + 23]
+    pshufb            m5, m1
+
+    mova              m10, [r4 + 1 * mmsize]
+
+    INTRA_PRED_ANG32_CAL_ROW
+    movu              [r0 + r1], m7
+    movu              [r0 + 2 * r1], m6
+
+    ;row[3, 4]
+    pslldq            xm11, 1
+    pinsrb            xm11, [r2 + 68], 0
+    vinserti128       m2, m11, xm11, 1
+    pshufb            m2, m1
+    vbroadcasti128    m3, [r2 + 6]
+    pshufb            m3, m1
+    vbroadcasti128    m4, [r2 + 14]
+    pshufb            m4, m1
+    vbroadcasti128    m5, [r2 + 22]
+    pshufb            m5, m1
+
+    mova              m10, [r4 + 2 * mmsize]
+
+    INTRA_PRED_ANG32_CAL_ROW
+    movu              [r0 + r3], m7
+    lea               r0, [r0 + 4 * r1]
+    movu              [r0], m6
+
+    ;row[5, 6]
+    pslldq            xm11, 1
+    pinsrb            xm11, [r2 + 70], 0
+    vinserti128       m2, m11, xm11, 1
+    pshufb            m2, m1
+    vbroadcasti128    m3, [r2 + 5]
+    pshufb            m3, m1
+    vbroadcasti128    m4, [r2 + 13]
+    pshufb            m4, m1
+    vbroadcasti128    m5, [r2 + 21]
+    pshufb            m5, m1
+
+    mova              m10, [r4 + 3 * mmsize]
+
+    INTRA_PRED_ANG32_CAL_ROW
+    movu              [r0 + r1], m7
+    movu              [r0 + 2 * r1], m6
+
+    ;row[7, 8]
+    pslldq            xm11, 1
+    pinsrb            xm11, [r2 + 72], 0
+    vinserti128       m2, m11, xm11, 1
+    pshufb            m2, m1
+    vbroadcasti128    m3, [r2 + 4]
+    pshufb            m3, m1
+    vbroadcasti128    m4, [r2 + 12]
+    pshufb            m4, m1
+    vbroadcasti128    m5, [r2 + 20]
+    pshufb            m5, m1
+
+    add               r4, 4 * mmsize
+    mova              m10, [r4 + 0 * mmsize]
+
+    INTRA_PRED_ANG32_CAL_ROW
+    movu              [r0 + r3], m7
+    lea               r0, [r0 + 4 * r1]
+    movu              [r0], m6
+
+    ;row[9, 10]
+    pslldq            xm11, 1
+    pinsrb            xm11, [r2 + 73], 0
+    vinserti128       m2, m11, xm11, 1
+    pshufb            m2, m1
+    vbroadcasti128    m3, [r2 + 3]
+    pshufb            m3, m1
+    vbroadcasti128    m4, [r2 + 11]
+    pshufb            m4, m1
+    vbroadcasti128    m5, [r2 + 19]
+    pshufb            m5, m1
+
+    mova              m10, [r4 + 1 * mmsize]
+
+    INTRA_PRED_ANG32_CAL_ROW
+    movu              [r0 + r1], m7
+    movu              [r0 + 2 * r1], m6
+
+    ;row[11, 12]
+    pslldq            xm11, 1
+    pinsrb            xm11, [r2 + 75], 0
+    vinserti128       m2, m11, xm11, 1
+    pshufb            m2, m1
+    vbroadcasti128    m3, [r2 + 2]
+    pshufb            m3, m1
+    vbroadcasti128    m4, [r2 + 10]
+    pshufb            m4, m1
+    vbroadcasti128    m5, [r2 + 18]
+    pshufb            m5, m1
+
+    mova              m10, [r4 + 2 * mmsize]
+
+    INTRA_PRED_ANG32_CAL_ROW
+    movu              [r0 + r3], m7
+    lea               r0, [r0 + 4 * r1]
+    movu              [r0], m6
+
+    ;row[13, 14]
+    pslldq            xm11, 1
+    pinsrb            xm11, [r2 + 77], 0
+    vinserti128       m2, m11, xm11, 1
+    pshufb            m2, m1
+    vbroadcasti128    m3, [r2 + 1]
+    pshufb            m3, m1
+    vbroadcasti128    m4, [r2 + 9]
+    pshufb            m4, m1
+    vbroadcasti128    m5, [r2 + 17]
+    pshufb            m5, m1
+
+    mova              m10, [r4 + 3 * mmsize]
+
+    INTRA_PRED_ANG32_CAL_ROW
+    movu              [r0 + r1], m7
+    movu              [r0 + 2 * r1], m6
+
+    ;row[15]
+    pslldq            xm11, 1
+    pinsrb            xm11, [r2 + 79], 0
+    vinserti128       m2, m11, xm11, 1
+    pshufb            m2, m1
+    vbroadcasti128    m12, [r2 + 0]
+    pshufb            m3, m12, m1
+    vbroadcasti128    m4, [r2 + 8]
+    pshufb            m4, m1
+    vbroadcasti128    m5, [r2 + 16]
+    pshufb            m5, m1
+    vperm2i128        m6, m2, m3, 00100000b
+    add               r4, 4 * mmsize
+    pmaddubsw         m6, [r4 + 0 * mmsize]
+    pmulhrsw          m6, m0
+    vperm2i128        m7, m4, m5, 00100000b
+    pmaddubsw         m7, [r4 + 0 * mmsize]
+    pmulhrsw          m7, m0
+    packuswb          m6, m7
+    vpermq            m6, m6, 11011000b
+    movu              [r0 + r3], m6
+
+    ;row[16, 17]
+    pslldq            xm11, 1
+    pinsrb            xm11, [r2 + 81], 0
+    vinserti128       m2, m11, xm11, 1
+    pshufb            m2, m1
+    pslldq            xm12, 1
+    pinsrb            xm12, [r2 + 66], 0
+    vinserti128       m3, m12, xm12, 1
+    pshufb            m3, m1
+    vbroadcasti128    m4, [r2 + 7]
+    pshufb            m4, m1
+    vbroadcasti128    m5, [r2 + 15]
+    pshufb            m5, m1
+
+    mova              m10, [r4 + 1 * mmsize]
+
+    INTRA_PRED_ANG32_CAL_ROW
+    lea               r0, [r0 + 4 * r1]
+    movu              [r0], m7
+    movu              [r0 + r1], m6
+
+    ;row[18, 19]
+    pslldq            xm11, 1
+    pinsrb            xm11, [r2 + 83], 0
+    vinserti128       m2, m11, xm11, 1
+    pshufb            m2, m1
+    pslldq            xm12, 1
+    pinsrb            xm12, [r2 + 68], 0
+    vinserti128       m3, m12, xm12, 1
+    pshufb            m3, m1
+    vbroadcasti128    m4, [r2 + 6]
+    pshufb            m4, m1
+    vbroadcasti128    m5, [r2 + 14]
+    pshufb            m5, m1
+
+    mova              m10, [r4 + 2 * mmsize]
+
+    INTRA_PRED_ANG32_CAL_ROW
+    movu              [r0 + 2 * r1], m7
+    movu              [r0 + r3], m6
+
+    ;row[20, 21]
+    pslldq            xm11, 1
+    pinsrb            xm11, [r2 + 85], 0
+    vinserti128       m2, m11, xm11, 1
+    pshufb            m2, m1
+    pslldq            xm12, 1
+    pinsrb            xm12, [r2 + 70], 0
+    vinserti128       m3, m12, xm12, 1
+    pshufb            m3, m1
+    vbroadcasti128    m4, [r2 + 5]
+    pshufb            m4, m1
+    vbroadcasti128    m5, [r2 + 13]
+    pshufb            m5, m1
+
+    mova              m10, [r4 + 3 * mmsize]
+
+    INTRA_PRED_ANG32_CAL_ROW
+    lea               r0, [r0 + 4 * r1]
+    movu              [r0], m7
+    movu              [r0 + r1], m6
+
+    ;row[22, 23]
+    pslldq            xm11, 1
+    pinsrb            xm11, [r2 + 87], 0
+    vinserti128       m2, m11, xm11, 1
+    pshufb            m2, m1
+    pslldq            xm12, 1
+    pinsrb            xm12, [r2 + 72], 0
+    vinserti128       m3, m12, xm12, 1
+    pshufb            m3, m1
+    vbroadcasti128    m4, [r2 + 4]
+    pshufb            m4, m1
+    vbroadcasti128    m5, [r2 + 12]
+    pshufb            m5, m1
+
+    add               r4, 4 * mmsize
+    mova              m10, [r4 + 0 * mmsize]
+
+    INTRA_PRED_ANG32_CAL_ROW
+    movu              [r0 + 2 * r1], m7
+    movu              [r0 + r3], m6
+
+    ;row[24, 25]
+    pslldq            xm11, 1
+    pinsrb            xm11, [r2 + 88], 0
+    vinserti128       m2, m11, xm11, 1
+    pshufb            m2, m1
+    pslldq            xm12, 1
+    pinsrb            xm12, [r2 + 73], 0
+    vinserti128       m3, m12, xm12, 1
+    pshufb            m3, m1
+    vbroadcasti128    m4, [r2 + 3]
+    pshufb            m4, m1
+    vbroadcasti128    m5, [r2 + 11]
+    pshufb            m5, m1
+
+    mova              m10, [r4 + 1 * mmsize]
+
+    INTRA_PRED_ANG32_CAL_ROW
+    lea               r0, [r0 + 4 * r1]
+    movu              [r0], m7
+    movu              [r0 + r1], m6
+
+    ;row[26, 27]
+    pslldq            xm11, 1
+    pinsrb            xm11, [r2 + 90], 0
+    vinserti128       m2, m11, xm11, 1
+    pshufb            m2, m1
+    pslldq            xm12, 1
+    pinsrb            xm12, [r2 + 75], 0
+    vinserti128       m3, m12, xm12, 1
+    pshufb            m3, m1
+    vbroadcasti128    m4, [r2 + 2]
+    pshufb            m4, m1
+    vbroadcasti128    m5, [r2 + 10]
+    pshufb            m5, m1
+
+    mova              m10, [r4 + 2 * mmsize]
+
+    INTRA_PRED_ANG32_CAL_ROW
+    movu              [r0 + 2 * r1], m7
+    movu              [r0 + r3], m6
+
+    ;row[28, 29]
+    pslldq            xm11, 1
+    pinsrb            xm11, [r2 + 92], 0
+    vinserti128       m2, m11, xm11, 1
+    pshufb            m2, m1
+    pslldq            xm12, 1
+    pinsrb            xm12, [r2 + 77], 0
+    vinserti128       m3, m12, xm12, 1
+    pshufb            m3, m1
+    vbroadcasti128    m4, [r2 + 1]
+    pshufb            m4, m1
+    vbroadcasti128    m5, [r2 + 9]
+    pshufb            m5, m1
+
+    mova              m10, [r4 + 3 * mmsize]
+
+    INTRA_PRED_ANG32_CAL_ROW
+    lea               r0, [r0 + 4 * r1]
+    movu              [r0], m7
+    movu              [r0 + r1], m6
+
+    ;row[30, 31]
+    pslldq            xm11, 1
+    pinsrb            xm11, [r2 + 94], 0
+    vinserti128       m2, m11, xm11, 1
+    pshufb            m2, m1
+    pslldq            xm12, 1
+    pinsrb            xm12, [r2 + 79], 0
+    vinserti128       m3, m12, xm12, 1
+    pshufb            m3, m1
+    vbroadcasti128    m4, [r2 + 0]
+    pshufb            m4, m1
+    vbroadcasti128    m5, [r2 + 8]
+    pshufb            m5, m1
+
+    mova              m10, [r4 + 4 * mmsize]
+
+    INTRA_PRED_ANG32_CAL_ROW
+    movu              [r0 + 2 * r1], m7
+    movu              [r0 + r3], m6
+    RET
 %endif


More information about the x265-devel mailing list