[x265] [PATCH 1 of 6] asm-intra_pred_ang32_34: improve performance by ~53% over SSSE3

praveen at multicorewareinc.com praveen at multicorewareinc.com
Thu Mar 19 06:03:03 CET 2015


# HG changeset patch
# User Praveen Tiwari <praveen at multicorewareinc.com>
# Date 1426669112 -19800
# Node ID 8f913bc33c441db473857076efb7086f8966b20a
# Parent  cbfa66e0b50cc2393ccbcf6471406504c6c06011
asm-intra_pred_ang32_34: improve performance by ~53% over SSSE3

AVX2:
intra_ang_32x32[34]     33.09x   550.32          18212.47

SSSE3:
intra_ang_32x32[34]     16.00x   1188.19         19011.38

diff -r cbfa66e0b50c -r 8f913bc33c44 source/common/x86/asm-primitives.cpp
--- a/source/common/x86/asm-primitives.cpp	Wed Mar 18 18:16:51 2015 -0500
+++ b/source/common/x86/asm-primitives.cpp	Wed Mar 18 14:28:32 2015 +0530
@@ -1530,6 +1530,7 @@
         p.cu[BLOCK_16x16].intra_pred[24] = x265_intra_pred_ang16_24_avx2;
         p.cu[BLOCK_16x16].intra_pred[23] = x265_intra_pred_ang16_23_avx2;
         p.cu[BLOCK_16x16].intra_pred[22] = x265_intra_pred_ang16_22_avx2;
+        p.cu[BLOCK_32x32].intra_pred[34] = x265_intra_pred_ang32_34_avx2;
 
         // copy_sp primitives
         p.cu[BLOCK_16x16].copy_sp = x265_blockcopy_sp_16x16_avx2;
diff -r cbfa66e0b50c -r 8f913bc33c44 source/common/x86/intrapred.h
--- a/source/common/x86/intrapred.h	Wed Mar 18 18:16:51 2015 -0500
+++ b/source/common/x86/intrapred.h	Wed Mar 18 14:28:32 2015 +0530
@@ -4,7 +4,7 @@
  * Copyright (C) 2003-2013 x264 project
  *
  * Authors: Min Chen <chenm003 at 163.com> <min.chen at multicorewareinc.com>
- *
+ *          Praveen Kumar Tiwari <praveen at multicorewareinc.com>
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
@@ -194,6 +194,7 @@
 void x265_intra_pred_ang16_24_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
 void x265_intra_pred_ang16_23_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
 void x265_intra_pred_ang16_22_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
+void x265_intra_pred_ang32_34_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
 void x265_all_angs_pred_4x4_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
 void x265_all_angs_pred_8x8_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
 void x265_all_angs_pred_16x16_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
diff -r cbfa66e0b50c -r 8f913bc33c44 source/common/x86/intrapred8.asm
--- a/source/common/x86/intrapred8.asm	Wed Mar 18 18:16:51 2015 -0500
+++ b/source/common/x86/intrapred8.asm	Wed Mar 18 14:28:32 2015 +0530
@@ -11301,3 +11301,95 @@
     INTRA_PRED_ANG16_MC5 47, 2
     INTRA_PRED_ANG16_MC0 r0 + 2 * r1, r0 + r3, 4
     RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang32_34, 3, 5,3
+    lea     r3, [3 * r1]
+
+    movu    m0, [r2 + 2]
+    movu    m1, [r2 + 18]
+
+    movu    [r0], m0
+
+    palignr m2, m1, m0, 1
+    movu    [r0 + r1], m2
+    palignr m2, m1, m0, 2
+    movu    [r0 + 2 * r1], m2
+    palignr m2, m1, m0, 3
+    movu    [r0 + r3], m2
+
+    lea     r0, [r0 + 4 * r1]
+    palignr m2, m1, m0, 4
+    movu    [r0], m2
+    palignr m2, m1, m0, 5
+    movu    [r0 + r1], m2
+    palignr m2, m1, m0, 6
+    movu    [r0 + 2 * r1], m2
+    palignr m2, m1, m0, 7
+    movu    [r0 + r3], m2
+
+    lea     r0, [r0 + 4 * r1]
+    palignr m2, m1, m0, 8
+    movu    [r0], m2
+    palignr m2, m1, m0, 9
+    movu    [r0 + r1], m2
+    palignr m2, m1, m0, 10
+    movu    [r0 + 2 * r1], m2
+    palignr m2, m1, m0, 11
+    movu    [r0 + r3], m2
+
+    lea     r0, [r0 + 4 * r1]
+    palignr m2, m1, m0, 12
+    movu    [r0], m2
+    palignr m2, m1, m0, 13
+    movu    [r0 + r1], m2
+    palignr m2, m1, m0, 14
+    movu    [r0 + 2 * r1], m2
+    palignr m2, m1, m0, 15
+    movu    [r0 + r3], m2
+
+    lea     r0, [r0 + 4 * r1]
+
+    palignr m2, m1, m0, 16
+    movu    [r0], m2
+
+    movu    m0, [r2 + 19]
+    movu    [r0 + r1], m0
+
+    movu    m1, [r2 + 35]
+
+    palignr m2, m1, m0, 1
+    movu    [r0 + 2 * r1], m2
+    palignr m2, m1, m0, 2
+    movu    [r0 + r3], m2
+
+    lea     r0, [r0 + 4 * r1]
+    palignr m2, m1, m0, 3
+    movu    [r0], m2
+    palignr m2, m1, m0, 4
+    movu    [r0 + r1], m2
+    palignr m2, m1, m0, 5
+    movu    [r0 + 2 * r1], m2
+    palignr m2, m1, m0, 6
+    movu    [r0 + r3], m2
+
+    lea     r0, [r0 + 4 * r1]
+    palignr m2, m1, m0, 7
+    movu    [r0], m2
+    palignr m2, m1, m0, 8
+    movu    [r0 + r1], m2
+    palignr m2, m1, m0, 9
+    movu    [r0 + 2 * r1], m2
+    palignr m2, m1, m0, 10
+    movu    [r0 + r3], m2
+
+    lea     r0, [r0 + 4 * r1]
+    palignr m2, m1, m0, 11
+    movu    [r0], m2
+    palignr m2, m1, m0, 12
+    movu    [r0 + r1], m2
+    palignr m2, m1, m0, 13
+    movu    [r0 + 2 * r1], m2
+    palignr m2, m1, m0, 14
+    movu    [r0 + r3], m2
+    RET


More information about the x265-devel mailing list