[x265] [PATCH 2 of 6] intra_pred_ang32_2: improve performance by ~48% over SSSE3

praveen at multicorewareinc.com praveen at multicorewareinc.com
Thu Mar 19 06:03:04 CET 2015


# HG changeset patch
# User Praveen Tiwari <praveen at multicorewareinc.com>
# Date 1426670751 -19800
# Node ID 1a4b131bfbb682dbb52e0f9af3e4522f1f172c8a
# Parent  8f913bc33c441db473857076efb7086f8966b20a
intra_pred_ang32_2: improve performance by ~48% over SSSE3

AVX2:
intra_ang_32x32[ 2]     69.22x   647.57          44826.26

SSSE3:
intra_ang_32x32[ 2]     34.74x   1257.43         43678.47

diff -r 8f913bc33c44 -r 1a4b131bfbb6 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Wed Mar 18 14:28:32 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp	Wed Mar 18 14:55:51 2015 +0530
@@ -1531,6 +1531,7 @@
         p.cu[BLOCK_16x16].intra_pred[23] = x265_intra_pred_ang16_23_avx2;
         p.cu[BLOCK_16x16].intra_pred[22] = x265_intra_pred_ang16_22_avx2;
         p.cu[BLOCK_32x32].intra_pred[34] = x265_intra_pred_ang32_34_avx2;
+        p.cu[BLOCK_32x32].intra_pred[2] = x265_intra_pred_ang32_2_avx2;
 
         // copy_sp primitives
         p.cu[BLOCK_16x16].copy_sp = x265_blockcopy_sp_16x16_avx2;
diff -r 8f913bc33c44 -r 1a4b131bfbb6 source/common/x86/intrapred.h
--- a/source/common/x86/intrapred.h	Wed Mar 18 14:28:32 2015 +0530
+++ b/source/common/x86/intrapred.h	Wed Mar 18 14:55:51 2015 +0530
@@ -195,6 +195,7 @@
 void x265_intra_pred_ang16_23_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
 void x265_intra_pred_ang16_22_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
 void x265_intra_pred_ang32_34_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
+void x265_intra_pred_ang32_2_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
 void x265_all_angs_pred_4x4_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
 void x265_all_angs_pred_8x8_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
 void x265_all_angs_pred_16x16_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
diff -r 8f913bc33c44 -r 1a4b131bfbb6 source/common/x86/intrapred8.asm
--- a/source/common/x86/intrapred8.asm	Wed Mar 18 14:28:32 2015 +0530
+++ b/source/common/x86/intrapred8.asm	Wed Mar 18 14:55:51 2015 +0530
@@ -11393,3 +11393,95 @@
     palignr m2, m1, m0, 14
     movu    [r0 + r3], m2
     RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang32_2, 3, 5,3
+    lea     r3, [3 * r1]
+
+    movu    m0, [r2 + 64 + 2]
+    movu    m1, [r2 + 64 + 18]
+
+    movu    [r0], m0
+
+    palignr m2, m1, m0, 1
+    movu    [r0 + r1], m2
+    palignr m2, m1, m0, 2
+    movu    [r0 + 2 * r1], m2
+    palignr m2, m1, m0, 3
+    movu    [r0 + r3], m2
+
+    lea     r0, [r0 + 4 * r1]
+    palignr m2, m1, m0, 4
+    movu    [r0], m2
+    palignr m2, m1, m0, 5
+    movu    [r0 + r1], m2
+    palignr m2, m1, m0, 6
+    movu    [r0 + 2 * r1], m2
+    palignr m2, m1, m0, 7
+    movu    [r0 + r3], m2
+
+    lea     r0, [r0 + 4 * r1]
+    palignr m2, m1, m0, 8
+    movu    [r0], m2
+    palignr m2, m1, m0, 9
+    movu    [r0 + r1], m2
+    palignr m2, m1, m0, 10
+    movu    [r0 + 2 * r1], m2
+    palignr m2, m1, m0, 11
+    movu    [r0 + r3], m2
+
+    lea     r0, [r0 + 4 * r1]
+    palignr m2, m1, m0, 12
+    movu    [r0], m2
+    palignr m2, m1, m0, 13
+    movu    [r0 + r1], m2
+    palignr m2, m1, m0, 14
+    movu    [r0 + 2 * r1], m2
+    palignr m2, m1, m0, 15
+    movu    [r0 + r3], m2
+
+    lea     r0, [r0 + 4 * r1]
+
+    palignr m2, m1, m0, 16
+    movu    [r0], m2
+
+    movu    m0, [r2 + 64 + 19]
+    movu    [r0 + r1], m0
+
+    movu    m1, [r2 + 64 + 35]
+
+    palignr m2, m1, m0, 1
+    movu    [r0 + 2 * r1], m2
+    palignr m2, m1, m0, 2
+    movu    [r0 + r3], m2
+
+    lea     r0, [r0 + 4 * r1]
+    palignr m2, m1, m0, 3
+    movu    [r0], m2
+    palignr m2, m1, m0, 4
+    movu    [r0 + r1], m2
+    palignr m2, m1, m0, 5
+    movu    [r0 + 2 * r1], m2
+    palignr m2, m1, m0, 6
+    movu    [r0 + r3], m2
+
+    lea     r0, [r0 + 4 * r1]
+    palignr m2, m1, m0, 7
+    movu    [r0], m2
+    palignr m2, m1, m0, 8
+    movu    [r0 + r1], m2
+    palignr m2, m1, m0, 9
+    movu    [r0 + 2 * r1], m2
+    palignr m2, m1, m0, 10
+    movu    [r0 + r3], m2
+
+    lea     r0, [r0 + 4 * r1]
+    palignr m2, m1, m0, 11
+    movu    [r0], m2
+    palignr m2, m1, m0, 12
+    movu    [r0 + r1], m2
+    palignr m2, m1, m0, 13
+    movu    [r0 + 2 * r1], m2
+    palignr m2, m1, m0, 14
+    movu    [r0 + r3], m2
+    RET


More information about the x265-devel mailing list