[x265] [PATCH 3 of 9] asm-intra_pred_ang16_32: improve ~45% on SSE4

praveen at multicorewareinc.com praveen at multicorewareinc.com
Tue Mar 17 06:11:04 CET 2015


# HG changeset patch
# User Praveen Tiwari <praveen at multicorewareinc.com>
# Date 1426513294 -19800
# Node ID b74b3d65a1092102e92cf7f9383b48c825abad3c
# Parent  6b657e598445ed64e3cbe182008d42725556b858
asm-intra_pred_ang16_32: improve ~45% on SSE4

AVX2:
intra_ang_16x16[32]     14.67x   565.54          8297.95

SSE4:
intra_ang_16x16[32]     7.92x    1041.01         8241.45

diff -r 6b657e598445 -r b74b3d65a109 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Mon Mar 16 18:57:08 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp	Mon Mar 16 19:11:34 2015 +0530
@@ -1514,6 +1514,7 @@
         p.cu[BLOCK_16x16].intra_pred[29] = x265_intra_pred_ang16_29_avx2;
         p.cu[BLOCK_16x16].intra_pred[30] = x265_intra_pred_ang16_30_avx2;
         p.cu[BLOCK_16x16].intra_pred[31] = x265_intra_pred_ang16_31_avx2;
+        p.cu[BLOCK_16x16].intra_pred[32] = x265_intra_pred_ang16_32_avx2;
 
         // copy_sp primitives
         p.cu[BLOCK_16x16].copy_sp = x265_blockcopy_sp_16x16_avx2;
diff -r 6b657e598445 -r b74b3d65a109 source/common/x86/intrapred.h
--- a/source/common/x86/intrapred.h	Mon Mar 16 18:57:08 2015 +0530
+++ b/source/common/x86/intrapred.h	Mon Mar 16 19:11:34 2015 +0530
@@ -189,6 +189,7 @@
 void x265_intra_pred_ang16_29_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
 void x265_intra_pred_ang16_30_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
 void x265_intra_pred_ang16_31_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
+void x265_intra_pred_ang16_32_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
 void x265_all_angs_pred_4x4_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
 void x265_all_angs_pred_8x8_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
 void x265_all_angs_pred_16x16_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
diff -r 6b657e598445 -r b74b3d65a109 source/common/x86/intrapred8.asm
--- a/source/common/x86/intrapred8.asm	Mon Mar 16 18:57:08 2015 +0530
+++ b/source/common/x86/intrapred8.asm	Mon Mar 16 19:11:34 2015 +0530
@@ -186,6 +186,19 @@
                       db 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31
                       db 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
 
+ALIGN 32
+c_ang16_mode_32:      db 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21
+                      db 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31
+                      db 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20
+                      db 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30
+                      db 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19
+                      db 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29
+                      db 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18
+                      db 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28
+                      db 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17
+                      db 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27
+                      db 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
+
 ALIGN 32
 ;; (blkSize - 1 - x)
 pw_planar4_0:         dw 3,  2,  1,  0,  3,  2,  1,  0
@@ -11093,3 +11106,124 @@
     vpermq            m3, m3, 11011000b
     movu              [r0 + r3], xm3
     RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang16_32, 3, 5, 6
+    mova              m0, [pw_1024]
+    mova              m5, [intra_pred_shuff_0_8]
+    lea               r3, [3 * r1]
+    lea               r4, [c_ang16_mode_32]
+
+    vbroadcasti128    m1, [r2 + 1]
+    pshufb            m1, m5
+    vbroadcasti128    m2, [r2 + 9]
+    pshufb            m2, m5
+
+    vperm2i128        m1, m1, m2, 00100000b
+    pmaddubsw         m3, m1, [r4 + 0 * mmsize]
+    pmulhrsw          m3, m0
+    packuswb          m3, m3
+    vpermq            m3, m3, 11011000b
+    movu              [r0], xm3
+
+    vbroadcasti128    m1, [r2 + 2]
+    pshufb            m1, m5
+    vbroadcasti128    m2, [r2 + 10]
+    pshufb            m2, m5
+
+    INTRA_PRED_ANG16_MC0 r0 + r1, r0 + 2 * r1, 1
+
+    vbroadcasti128    m1, [r2 + 3]
+    pshufb            m1, m5
+    vbroadcasti128    m2, [r2 + 11]
+    pshufb            m2, m5
+
+    vperm2i128        m1, m1, m2, 00100000b
+    pmaddubsw         m3, m1, [r4 + 2 * mmsize]
+    pmulhrsw          m3, m0
+    packuswb          m3, m3
+    vpermq            m3, m3, 11011000b
+    movu              [r0 + r3], xm3
+
+    vbroadcasti128    m1, [r2 + 4]
+    pshufb            m1, m5
+    vbroadcasti128    m2, [r2 + 12]
+    pshufb            m2, m5
+
+    lea              r0, [r0 + 4 * r1]
+    INTRA_PRED_ANG16_MC0 r0, r0 + r1, 3
+
+    vbroadcasti128    m1, [r2 + 5]
+    pshufb            m1, m5
+    vbroadcasti128    m2, [r2 + 13]
+    pshufb            m2, m5
+
+    add               r4, 4 * mmsize
+    vperm2i128        m1, m1, m2, 00100000b
+    pmaddubsw         m3, m1, [r4 + 0 * mmsize]
+    pmulhrsw          m3, m0
+    packuswb          m3, m3
+    vpermq            m3, m3, 11011000b
+    movu              [r0 + 2 * r1], xm3
+
+    vbroadcasti128    m1, [r2 + 6]
+    pshufb            m1, m5
+    vbroadcasti128    m2, [r2 + 14]
+    pshufb            m2, m5
+
+    INTRA_PRED_ANG16_MC0 r0 + r3, r0 + 4 * r1, 1
+
+    vbroadcasti128    m1, [r2 + 7]
+    pshufb            m1, m5
+    vbroadcasti128    m2, [r2 + 15]
+    pshufb            m2, m5
+
+    lea               r0, [r0 + 4 * r1]
+    vperm2i128        m1, m1, m2, 00100000b
+    pmaddubsw         m3, m1, [r4 + 2 * mmsize]
+    pmulhrsw          m3, m0
+    packuswb          m3, m3
+    vpermq            m3, m3, 11011000b
+    movu              [r0 + r1], xm3
+
+    vbroadcasti128    m1, [r2 + 8]
+    pshufb            m1, m5
+    vbroadcasti128    m2, [r2 + 16]
+    pshufb            m2, m5
+
+    INTRA_PRED_ANG16_MC0 r0 + 2 * r1, r0 + r3, 3
+
+    vbroadcasti128    m1, [r2 + 9]
+    pshufb            m1, m5
+    vbroadcasti128    m2, [r2 + 17]
+    pshufb            m2, m5
+
+    lea               r0, [r0 + 4 * r1]
+    add               r4, 4 * mmsize
+
+    vperm2i128        m1, m1, m2, 00100000b
+    pmaddubsw         m3, m1, [r4 + 0 * mmsize]
+    pmulhrsw          m3, m0
+    packuswb          m3, m3
+    vpermq            m3, m3, 11011000b
+    movu              [r0], xm3
+
+    vbroadcasti128    m1, [r2 + 10]
+    pshufb            m1, m5
+    vbroadcasti128    m2, [r2 + 18]
+    pshufb            m2, m5
+
+    INTRA_PRED_ANG16_MC0 r0 + r1, r0 + 2 * r1, 1
+
+    vbroadcasti128    m1, [r2 + 11]
+    pshufb            m1, m5
+    vbroadcasti128    m2, [r2 + 19]
+    pshufb            m2, m5
+
+    vperm2i128        m1, m1, m2, 00100000b
+    pmaddubsw         m3, m1, [r4 + 2 * mmsize]
+    pmulhrsw          m3, m0
+    packuswb          m3, m3
+    vpermq            m3, m3, 11011000b
+    movu              [r0 + r3], xm3
+    RET


More information about the x265-devel mailing list