[x265-commits] [x265] pixelharness: only test partition sizes plausibly used by...

Steve Borho steve at borho.org
Sun Oct 6 02:21:11 CEST 2013


details:   http://hg.videolan.org/x265/rev/699b843073de
branches:  
changeset: 4219:699b843073de
user:      Steve Borho <steve at borho.org>
date:      Sat Oct 05 16:07:55 2013 -0500
description:
pixelharness: only test partition sizes plausibly used by the encoder
Subject: [x265] pixel: only compile partition sizes that are used by the encoder

details:   http://hg.videolan.org/x265/rev/e5369adbccba
branches:  
changeset: 4220:e5369adbccba
user:      Steve Borho <steve at borho.org>
date:      Sat Oct 05 18:09:39 2013 -0500
description:
pixel: only compile partition sizes that are used by the encoder
Subject: [x265] pixel: eliminate width 12 SAD code for height != 16

details:   http://hg.videolan.org/x265/rev/bbc040a8109c
branches:  
changeset: 4221:bbc040a8109c
user:      Steve Borho <steve at borho.org>
date:      Sat Oct 05 18:17:00 2013 -0500
description:
pixel: eliminate width 12 SAD code for height != 16
Subject: [x265] pixel: eliminate width 24 SAD code for height != 32

details:   http://hg.videolan.org/x265/rev/fb475b36852c
branches:  
changeset: 4222:fb475b36852c
user:      Steve Borho <steve at borho.org>
date:      Sat Oct 05 18:21:36 2013 -0500
description:
pixel: eliminate width 24 SAD code for height != 32
Subject: [x265] pixel: remove sad_*_4<> intrinsic functions, they are covered by assembly

details:   http://hg.videolan.org/x265/rev/1ae7953bceb4
branches:  
changeset: 4223:1ae7953bceb4
user:      Steve Borho <steve at borho.org>
date:      Sat Oct 05 18:52:12 2013 -0500
description:
pixel: remove sad_*_4<> intrinsic functions, they are covered by assembly

x264 assembly code covers 4x4, 4x8, and 4x16 and those are the only 4-width
partitions used by x265.
Subject: [x265] pixel: simplify sad_64 primitives to a single loop

details:   http://hg.videolan.org/x265/rev/87b5a379a1d8
branches:  
changeset: 4224:87b5a379a1d8
user:      Steve Borho <steve at borho.org>
date:      Sat Oct 05 19:02:27 2013 -0500
description:
pixel: simplify sad_64 primitives to a single loop

64 width blocks will only be height 16, 32, 48, or 64.  Having an 8-row loop
is just fine
Subject: [x265] pixel: simplify sad_32 primitives to a single loop

details:   http://hg.videolan.org/x265/rev/affee51d4f86
branches:  
changeset: 4225:affee51d4f86
user:      Steve Borho <steve at borho.org>
date:      Sat Oct 05 19:15:06 2013 -0500
description:
pixel: simplify sad_32 primitives to a single loop

32 width blocks will only be height 8, 16, 24, or 32.  Having an 8-row loop
is just fine
Subject: [x265] pixel: eliminate width 48 SAD code for height != 64

details:   http://hg.videolan.org/x265/rev/8f7091d09c11
branches:  
changeset: 4226:8f7091d09c11
user:      Steve Borho <steve at borho.org>
date:      Sat Oct 05 19:20:28 2013 -0500
description:
pixel: eliminate width 48 SAD code for height != 64

diffstat:

 source/common/vec/pixel-avx2.cpp  |     31 +-
 source/common/vec/pixel-sse3.cpp  |     24 +-
 source/common/vec/pixel-sse41.cpp |  22261 ++++-------------------------------
 source/common/vec/pixel.inc       |     86 -
 source/test/pixelharness.cpp      |    284 +-
 source/test/pixelharness.h        |      2 +
 6 files changed, 3124 insertions(+), 19564 deletions(-)

diffs (truncated from 23543 to 300 lines):

diff -r 49231db18e60 -r 8f7091d09c11 source/common/vec/pixel-avx2.cpp
--- a/source/common/vec/pixel-avx2.cpp	Sat Oct 05 16:25:21 2013 -0500
+++ b/source/common/vec/pixel-avx2.cpp	Sat Oct 05 19:20:28 2013 -0500
@@ -28,7 +28,6 @@
 #define INSTRSET 8
 #include "vectorclass.h"
 
-#define ARCH avx2
 using namespace x265;
 
 namespace {
@@ -445,28 +444,26 @@ void sad_avx2_x4_64(pixel *fenc, pixel *
 #endif
 }
 
-#define SET_FUNC_PRIMITIVE_TABLE_WIDTH(WIDTH, FUNC_PREFIX, FUNC_PREFIX_DEF, FUNC_TYPE_CAST) \
-    p.FUNC_PREFIX[PARTITION_ ## WIDTH ## x4] = (FUNC_TYPE_CAST)FUNC_PREFIX_DEF ## WIDTH<4>;  \
-    p.FUNC_PREFIX[PARTITION_ ## WIDTH ## x8] = (FUNC_TYPE_CAST)FUNC_PREFIX_DEF ## WIDTH<8>;  \
-    p.FUNC_PREFIX[PARTITION_ ## WIDTH ## x12] = (FUNC_TYPE_CAST)FUNC_PREFIX_DEF ## WIDTH<12>; \
-    p.FUNC_PREFIX[PARTITION_ ## WIDTH ## x16] = (FUNC_TYPE_CAST)FUNC_PREFIX_DEF ## WIDTH<16>; \
-    p.FUNC_PREFIX[PARTITION_ ## WIDTH ## x24] = (FUNC_TYPE_CAST)FUNC_PREFIX_DEF ## WIDTH<24>; \
-    p.FUNC_PREFIX[PARTITION_ ## WIDTH ## x32] = (FUNC_TYPE_CAST)FUNC_PREFIX_DEF ## WIDTH<32>; \
-    p.FUNC_PREFIX[PARTITION_ ## WIDTH ## x48] = (FUNC_TYPE_CAST)FUNC_PREFIX_DEF ## WIDTH<48>; \
-    p.FUNC_PREFIX[PARTITION_ ## WIDTH ## x64] = (FUNC_TYPE_CAST)FUNC_PREFIX_DEF ## WIDTH<64>;
-
 namespace x265 {
 void Setup_Vec_PixelPrimitives_avx2(EncoderPrimitives &p)
 {
     p.sad[0] = p.sad[0];
+#define SET_SADS(W, H) \
+    p.sad[PARTITION_##W##x##H] = sad_avx2_##W<H>; \
+    p.sad_x3[PARTITION_##W##x##H] = sad_avx2_x3_##W<H>; \
+    p.sad_x4[PARTITION_##W##x##H] = sad_avx2_x4_##W<H>; \
+
 #if !HIGH_BIT_DEPTH 
 #if (defined(__GNUC__) || defined(__INTEL_COMPILER))
-    SET_FUNC_PRIMITIVE_TABLE_WIDTH(32, sad, sad_avx2_, pixelcmp_t)
-    SET_FUNC_PRIMITIVE_TABLE_WIDTH(64, sad, sad_avx2_, pixelcmp_t)
-    SET_FUNC_PRIMITIVE_TABLE_WIDTH(32, sad_x3, sad_avx2_x3_, pixelcmp_x3_t)
-    SET_FUNC_PRIMITIVE_TABLE_WIDTH(64, sad_x3, sad_avx2_x3_, pixelcmp_x3_t)
-    SET_FUNC_PRIMITIVE_TABLE_WIDTH(32, sad_x4, sad_avx2_x4_, pixelcmp_x4_t)
-    SET_FUNC_PRIMITIVE_TABLE_WIDTH(64, sad_x4, sad_avx2_x4_, pixelcmp_x4_t)
+    SET_SADS(32, 8);
+    SET_SADS(32, 16);
+    SET_SADS(32, 24);
+    SET_SADS(32, 32);
+    SET_SADS(32, 64);
+    SET_SADS(64, 16);
+    SET_SADS(64, 32);
+    SET_SADS(64, 48);
+    SET_SADS(64, 64);
 #endif
 #endif
 }
diff -r 49231db18e60 -r 8f7091d09c11 source/common/vec/pixel-sse3.cpp
--- a/source/common/vec/pixel-sse3.cpp	Sat Oct 05 16:25:21 2013 -0500
+++ b/source/common/vec/pixel-sse3.cpp	Sat Oct 05 19:20:28 2013 -0500
@@ -307,5 +307,27 @@ void blockfil_s_32(short *dst, intptr_t 
 
 #define INSTRSET 3
 #include "vectorclass.h"
-#define ARCH sse3
 #include "pixel.inc"
+
+namespace x265 {
+void Setup_Vec_PixelPrimitives_sse3(EncoderPrimitives &p)
+{
+    p.cvt16to32     = convert16to32;
+    p.cvt32to16     = convert32to16;
+    p.cvt32to16_shr = convert32to16_shr;
+
+    p.cvt16to32_shl = convert16to32_shl;
+    p.cvt16to16_shl = convert16to16_shl;
+
+#if !HIGH_BIT_DEPTH
+    p.transpose[0] = transpose4;
+    p.transpose[1] = transpose8;
+    p.transpose[2] = transpose16;
+    p.transpose[3] = transpose32;
+    p.blockfil_s[BLOCK_4x4]   = blockfil_s_4;
+    p.blockfil_s[BLOCK_8x8]   = blockfil_s_8;
+    p.blockfil_s[BLOCK_16x16] = blockfil_s_16;
+    p.blockfil_s[BLOCK_32x32] = blockfil_s_32;
+#endif /* if HIGH_BIT_DEPTH */
+}
+}
diff -r 49231db18e60 -r 8f7091d09c11 source/common/vec/pixel-sse41.cpp
--- a/source/common/vec/pixel-sse41.cpp	Sat Oct 05 16:25:21 2013 -0500
+++ b/source/common/vec/pixel-sse41.cpp	Sat Oct 05 19:20:28 2013 -0500
@@ -46,404 +46,6 @@ namespace {
 #if !HIGH_BIT_DEPTH
 #if HAVE_MMX
 template<int ly>
-int sad_4(pixel * fenc, intptr_t fencstride, pixel * fref, intptr_t frefstride)
-{
-    assert((ly % 4) == 0);
-
-    __m64 sum0 = _mm_setzero_si64();
-
-    __m64 T00, T01, T02, T03;
-    __m64 T10, T11, T12, T13;
-    __m64 T20, T21, T22, T23;
-
-    if ((ly % 16) == 0)
-    {
-        for (int i = 0; i < ly; i += 16)
-        {
-            T00 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 0) * fencstride));
-            T01 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 1) * fencstride));
-            T02 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 2) * fencstride));
-            T03 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 3) * fencstride));
-
-            T10 = _mm_cvtsi32_si64(*(int*)(fref + (i + 0) * frefstride));
-            T11 = _mm_cvtsi32_si64(*(int*)(fref + (i + 1) * frefstride));
-            T12 = _mm_cvtsi32_si64(*(int*)(fref + (i + 2) * frefstride));
-            T13 = _mm_cvtsi32_si64(*(int*)(fref + (i + 3) * frefstride));
-
-            T20 = _mm_sad_pu8(T00, T10);
-            T21 = _mm_sad_pu8(T01, T11);
-            T22 = _mm_sad_pu8(T02, T12);
-            T23 = _mm_sad_pu8(T03, T13);
-
-            sum0 = _mm_add_pi16(sum0, T20);
-            sum0 = _mm_add_pi16(sum0, T21);
-            sum0 = _mm_add_pi16(sum0, T22);
-            sum0 = _mm_add_pi16(sum0, T23);
-
-            T00 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 4) * fencstride));
-            T01 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 5) * fencstride));
-            T02 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 6) * fencstride));
-            T03 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 7) * fencstride));
-
-            T10 = _mm_cvtsi32_si64(*(int*)(fref + (i + 4) * frefstride));
-            T11 = _mm_cvtsi32_si64(*(int*)(fref + (i + 5) * frefstride));
-            T12 = _mm_cvtsi32_si64(*(int*)(fref + (i + 6) * frefstride));
-            T13 = _mm_cvtsi32_si64(*(int*)(fref + (i + 7) * frefstride));
-
-            T20 = _mm_sad_pu8(T00, T10);
-            T21 = _mm_sad_pu8(T01, T11);
-            T22 = _mm_sad_pu8(T02, T12);
-            T23 = _mm_sad_pu8(T03, T13);
-
-            sum0 = _mm_add_pi16(sum0, T20);
-            sum0 = _mm_add_pi16(sum0, T21);
-            sum0 = _mm_add_pi16(sum0, T22);
-            sum0 = _mm_add_pi16(sum0, T23);
-
-            T00 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 8) * fencstride));
-            T01 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 9) * fencstride));
-            T02 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 10) * fencstride));
-            T03 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 11) * fencstride));
-
-            T10 = _mm_cvtsi32_si64(*(int*)(fref + (i + 8) * frefstride));
-            T11 = _mm_cvtsi32_si64(*(int*)(fref + (i + 9) * frefstride));
-            T12 = _mm_cvtsi32_si64(*(int*)(fref + (i + 10) * frefstride));
-            T13 = _mm_cvtsi32_si64(*(int*)(fref + (i + 11) * frefstride));
-
-            T20 = _mm_sad_pu8(T00, T10);
-            T21 = _mm_sad_pu8(T01, T11);
-            T22 = _mm_sad_pu8(T02, T12);
-            T23 = _mm_sad_pu8(T03, T13);
-
-            sum0 = _mm_add_pi16(sum0, T20);
-            sum0 = _mm_add_pi16(sum0, T21);
-            sum0 = _mm_add_pi16(sum0, T22);
-            sum0 = _mm_add_pi16(sum0, T23);
-
-            T00 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 12) * fencstride));
-            T01 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 13) * fencstride));
-            T02 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 14) * fencstride));
-            T03 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 15) * fencstride));
-
-            T10 = _mm_cvtsi32_si64(*(int*)(fref + (i + 12) * frefstride));
-            T11 = _mm_cvtsi32_si64(*(int*)(fref + (i + 13) * frefstride));
-            T12 = _mm_cvtsi32_si64(*(int*)(fref + (i + 14) * frefstride));
-            T13 = _mm_cvtsi32_si64(*(int*)(fref + (i + 15) * frefstride));
-
-            T20 = _mm_sad_pu8(T00, T10);
-            T21 = _mm_sad_pu8(T01, T11);
-            T22 = _mm_sad_pu8(T02, T12);
-            T23 = _mm_sad_pu8(T03, T13);
-
-            sum0 = _mm_add_pi16(sum0, T20);
-            sum0 = _mm_add_pi16(sum0, T21);
-            sum0 = _mm_add_pi16(sum0, T22);
-            sum0 = _mm_add_pi16(sum0, T23);
-        }
-    }
-    else if ((ly % 8) == 0)
-    {
-        for (int i = 0; i < ly; i += 8)
-        {
-            T00 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 0) * fencstride));
-            T01 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 1) * fencstride));
-            T02 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 2) * fencstride));
-            T03 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 3) * fencstride));
-
-            T10 = _mm_cvtsi32_si64(*(int*)(fref + (i + 0) * frefstride));
-            T11 = _mm_cvtsi32_si64(*(int*)(fref + (i + 1) * frefstride));
-            T12 = _mm_cvtsi32_si64(*(int*)(fref + (i + 2) * frefstride));
-            T13 = _mm_cvtsi32_si64(*(int*)(fref + (i + 3) * frefstride));
-
-            T20 = _mm_sad_pu8(T00, T10);
-            T21 = _mm_sad_pu8(T01, T11);
-            T22 = _mm_sad_pu8(T02, T12);
-            T23 = _mm_sad_pu8(T03, T13);
-
-            sum0 = _mm_add_pi16(sum0, T20);
-            sum0 = _mm_add_pi16(sum0, T21);
-            sum0 = _mm_add_pi16(sum0, T22);
-            sum0 = _mm_add_pi16(sum0, T23);
-
-            T00 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 4) * fencstride));
-            T01 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 5) * fencstride));
-            T02 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 6) * fencstride));
-            T03 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 7) * fencstride));
-
-            T10 = _mm_cvtsi32_si64(*(int*)(fref + (i + 4) * frefstride));
-            T11 = _mm_cvtsi32_si64(*(int*)(fref + (i + 5) * frefstride));
-            T12 = _mm_cvtsi32_si64(*(int*)(fref + (i + 6) * frefstride));
-            T13 = _mm_cvtsi32_si64(*(int*)(fref + (i + 7) * frefstride));
-
-            T20 = _mm_sad_pu8(T00, T10);
-            T21 = _mm_sad_pu8(T01, T11);
-            T22 = _mm_sad_pu8(T02, T12);
-            T23 = _mm_sad_pu8(T03, T13);
-
-            sum0 = _mm_add_pi16(sum0, T20);
-            sum0 = _mm_add_pi16(sum0, T21);
-            sum0 = _mm_add_pi16(sum0, T22);
-            sum0 = _mm_add_pi16(sum0, T23);
-        }
-    }
-    else
-    {
-        for (int i = 0; i < ly; i += 4)
-        {
-            T00 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 0) * fencstride));
-            T01 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 1) * fencstride));
-            T02 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 2) * fencstride));
-            T03 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 3) * fencstride));
-
-            T10 = _mm_cvtsi32_si64(*(int*)(fref + (i + 0) * frefstride));
-            T11 = _mm_cvtsi32_si64(*(int*)(fref + (i + 1) * frefstride));
-            T12 = _mm_cvtsi32_si64(*(int*)(fref + (i + 2) * frefstride));
-            T13 = _mm_cvtsi32_si64(*(int*)(fref + (i + 3) * frefstride));
-
-            T20 = _mm_sad_pu8(T00, T10);
-            T21 = _mm_sad_pu8(T01, T11);
-            T22 = _mm_sad_pu8(T02, T12);
-            T23 = _mm_sad_pu8(T03, T13);
-
-            sum0 = _mm_add_pi16(sum0, T20);
-            sum0 = _mm_add_pi16(sum0, T21);
-            sum0 = _mm_add_pi16(sum0, T22);
-            sum0 = _mm_add_pi16(sum0, T23);
-        }
-    }
-    // 8 * 255 -> 11 bits x 8 -> 14 bits
-    int sum = _m_to_int(sum0);
-    return sum;
-}
-
-#else /* if HAVE_MMX */
-
-template<int ly>
-int sad_4(pixel * fenc, intptr_t fencstride, pixel * fref, intptr_t frefstride)
-{
-    assert((ly % 4) == 0);
-    __m128i sum0 = _mm_setzero_si128();
-    __m128i sum1 = _mm_setzero_si128();
-    __m128i T00, T01, T02, T03;
-    __m128i T10, T11, T12, T13;
-    __m128i T20 = _mm_setzero_si128();
-
-    if (ly == 4)
-    {
-        T00 = _mm_loadl_epi64((__m128i*)(fenc + (0) * fencstride));
-        T01 = _mm_loadl_epi64((__m128i*)(fenc + (1) * fencstride));
-        T01 = _mm_unpacklo_epi32(T00, T01);
-        T02 = _mm_loadl_epi64((__m128i*)(fenc + (2) * fencstride));
-        T03 = _mm_loadl_epi64((__m128i*)(fenc + (3) * fencstride));
-        T03 = _mm_unpacklo_epi32(T02, T03);
-        T03 = _mm_unpacklo_epi64(T01, T03);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref + (0) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref + (1) * frefstride));
-        T11 = _mm_unpacklo_epi32(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref + (2) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref + (3) * frefstride));
-        T13 = _mm_unpacklo_epi32(T12, T13);
-        T13 = _mm_unpacklo_epi64(T11, T13);
-
-        T20 = _mm_sad_epu8(T03, T13);
-        sum0 = _mm_add_epi32(sum0, T20);
-    }
-    else if (ly == 8)
-    {
-        T00 = _mm_loadl_epi64((__m128i*)(fenc + (0) * fencstride));
-        T01 = _mm_loadl_epi64((__m128i*)(fenc + (1) * fencstride));


More information about the x265-commits mailing list