[x265] [PATCH 3 of 4] intra_pred_ang16_22: improve speedup by ~19 over SSE4

praveen at multicorewareinc.com praveen at multicorewareinc.com
Wed Mar 18 05:48:31 CET 2015


# HG changeset patch
# User Praveen Tiwari <praveen at multicorewareinc.com>
# Date 1426595733 -19800
# Node ID eeee135bfe2d81e275304ffed46885955015c6bf
# Parent  8e96973847fdcb5140a67ae3a67e66dfb87b5040
intra_pred_ang16_22: improve speedup by ~19 over SSE4

AVX2:
intra_ang_16x16[22]     14.40x   718.79          10351.97

SSE4:
intra_ang_16x16[22]     12.30x   888.03          10926.41

diff -r 8e96973847fd -r eeee135bfe2d source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Tue Mar 17 16:22:58 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp	Tue Mar 17 18:05:33 2015 +0530
@@ -1529,6 +1529,7 @@
         p.cu[BLOCK_16x16].intra_pred[33] = x265_intra_pred_ang16_33_avx2;
         p.cu[BLOCK_16x16].intra_pred[24] = x265_intra_pred_ang16_24_avx2;
         p.cu[BLOCK_16x16].intra_pred[23] = x265_intra_pred_ang16_23_avx2;
+        p.cu[BLOCK_16x16].intra_pred[22] = x265_intra_pred_ang16_22_avx2;
 
         // copy_sp primitives
         p.cu[BLOCK_16x16].copy_sp = x265_blockcopy_sp_16x16_avx2;
diff -r 8e96973847fd -r eeee135bfe2d source/common/x86/intrapred.h
--- a/source/common/x86/intrapred.h	Tue Mar 17 16:22:58 2015 +0530
+++ b/source/common/x86/intrapred.h	Tue Mar 17 18:05:33 2015 +0530
@@ -193,6 +193,7 @@
 void x265_intra_pred_ang16_33_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
 void x265_intra_pred_ang16_24_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
 void x265_intra_pred_ang16_23_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
+void x265_intra_pred_ang16_22_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
 void x265_all_angs_pred_4x4_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
 void x265_all_angs_pred_8x8_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
 void x265_all_angs_pred_16x16_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
diff -r 8e96973847fd -r eeee135bfe2d source/common/x86/intrapred8.asm
--- a/source/common/x86/intrapred8.asm	Tue Mar 17 16:22:58 2015 +0530
+++ b/source/common/x86/intrapred8.asm	Tue Mar 17 18:05:33 2015 +0530
@@ -236,6 +236,17 @@
                      db 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2
                      db 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
 
+ALIGN 32
+c_ang16_mode_22:     db 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6, 26, 6
+                     db 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12
+                     db 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18
+                     db 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5
+                     db 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11, 21, 11
+                     db 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17
+                     db 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4
+                     db 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10
+                     db 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16
+
 ALIGN 32
 ;; (blkSize - 1 - x)
 pw_planar4_0:         dw 3,  2,  1,  0,  3,  2,  1,  0
@@ -11250,3 +11261,72 @@
     pshufb            m2, m5
     INTRA_PRED_ANG16_MC0 r0 + 2 * r1, r0 + r3, 0
     RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang16_22, 3, 5, 7
+    mova              m0, [pw_1024]
+    mova              m5, [intra_pred_shuff_0_8]
+    lea               r3, [3 * r1]
+    lea               r4, [c_ang16_mode_22]
+
+    INTRA_PRED_ANG16_MC2 0
+    INTRA_PRED_ANG16_MC0 r0, r0 + r1, 0
+
+    movu              xm6, [r2 - 1]
+    pinsrb            xm6, [r2 + 34], 0
+    vinserti128       m1, m6, xm6, 1
+    pshufb            m1, m5
+    vbroadcasti128    m2, [r2 + 7]
+    pshufb            m2, m5
+    INTRA_PRED_ANG16_MC0 r0 + 2 * r1, r0 + r3, 1
+
+    lea               r0, [r0 + 4 * r1]
+
+    pslldq            xm6, xm6, 1
+    pinsrb            xm6, [r2 + 37], 0
+    vinserti128       m1, m6, xm6, 1
+    pshufb            m1, m5
+    vbroadcasti128    m2, [r2 + 6]
+    pshufb            m2, m5
+    INTRA_PRED_ANG16_MC0 r0, r0 + r1, 2
+    INTRA_PRED_ANG16_MC3 r0 + 2 * r1, 3
+
+    add               r4, 4 * mmsize
+
+    pslldq            xm6, xm6, 1
+    pinsrb            xm6, [r2 + 39], 0
+    vinserti128       m1, m6, xm6, 1
+    pshufb            m1, m5
+    vbroadcasti128    m2, [r2 + 5]
+    pshufb            m2, m5
+    INTRA_PRED_ANG16_MC0 r0 + r3, r0 + 4 * r1, 0
+
+    lea               r0, [r0 + 4 * r1]
+
+    pslldq            xm6, xm6, 1
+    pinsrb            xm6, [r2 + 42], 0
+    vinserti128       m1, m6, xm6, 1
+    pshufb            m1, m5
+    vbroadcasti128    m2, [r2 + 4]
+    pshufb            m2, m5
+    INTRA_PRED_ANG16_MC0 r0 + r1, r0 + 2 * r1, 1
+    INTRA_PRED_ANG16_MC3 r0 + r3, 2
+
+    lea               r0, [r0 + 4 * r1]
+
+    pslldq            xm6, xm6, 1
+    pinsrb            xm6, [r2 + 44], 0
+    vinserti128       m1, m6, xm6, 1
+    pshufb            m1, m5
+    vbroadcasti128    m2, [r2 + 3]
+    pshufb            m2, m5
+    INTRA_PRED_ANG16_MC0 r0, r0 + r1, 3
+
+    pslldq            xm6, xm6, 1
+    pinsrb            xm6, [r2 + 47], 0
+    vinserti128       m1, m6, xm6, 1
+    pshufb            m1, m5
+    vbroadcasti128    m2, [r2 + 2]
+    pshufb            m2, m5
+    INTRA_PRED_ANG16_MC0 r0 + 2 * r1, r0 + r3, 4
+    RET


More information about the x265-devel mailing list