[x265-commits] [x265] pixel: limit sad_8 routines to just height 32

Steve Borho steve at borho.org
Sun Oct 6 02:44:18 CEST 2013


details:   http://hg.videolan.org/x265/rev/19b319c9a6aa
branches:  
changeset: 4227:19b319c9a6aa
user:      Steve Borho <steve at borho.org>
date:      Sat Oct 05 19:43:50 2013 -0500
description:
pixel: limit sad_8 routines to just height 32

8x4, 8x8, and 8x16 are handled by x264 assembly, only 8x32 remains for the
intrinsic function to cover

diffstat:

 source/common/vec/pixel-sse41.cpp |  3073 +++++-------------------------------
 1 files changed, 445 insertions(+), 2628 deletions(-)

diffs (truncated from 3293 to 300 lines):

diff -r 8f7091d09c11 -r 19b319c9a6aa source/common/vec/pixel-sse41.cpp
--- a/source/common/vec/pixel-sse41.cpp	Sat Oct 05 19:20:28 2013 -0500
+++ b/source/common/vec/pixel-sse41.cpp	Sat Oct 05 19:43:50 2013 -0500
@@ -45,395 +45,153 @@ using namespace x265;
 namespace {
 #if !HIGH_BIT_DEPTH
 #if HAVE_MMX
-template<int ly>
+template<int ly>  // ly will always be 32
 int sad_8(pixel * fenc, intptr_t fencstride, pixel * fref, intptr_t frefstride)
 {
-    assert((ly % 4) == 0);
-
     __m64 sum0 = _mm_setzero_si64();
 
     __m64 T00, T01, T02, T03;
     __m64 T10, T11, T12, T13;
     __m64 T20, T21, T22, T23;
 
-    if ((ly % 16) == 0)
+    for (int i = 0; i < ly; i += 16)
     {
-        for (int i = 0; i < ly; i += 16)
-        {
-            T00 = (*(__m64*)(fenc + (i + 0) * fencstride));
-            T01 = (*(__m64*)(fenc + (i + 1) * fencstride));
-            T02 = (*(__m64*)(fenc + (i + 2) * fencstride));
-            T03 = (*(__m64*)(fenc + (i + 3) * fencstride));
-
-            T10 = (*(__m64*)(fref + (i + 0) * frefstride));
-            T11 = (*(__m64*)(fref + (i + 1) * frefstride));
-            T12 = (*(__m64*)(fref + (i + 2) * frefstride));
-            T13 = (*(__m64*)(fref + (i + 3) * frefstride));
-
-            T20 = _mm_sad_pu8(T00, T10);
-            T21 = _mm_sad_pu8(T01, T11);
-            T22 = _mm_sad_pu8(T02, T12);
-            T23 = _mm_sad_pu8(T03, T13);
-
-            sum0 = _mm_add_pi16(sum0, T20);
-            sum0 = _mm_add_pi16(sum0, T21);
-            sum0 = _mm_add_pi16(sum0, T22);
-            sum0 = _mm_add_pi16(sum0, T23);
-
-            T00 = (*(__m64*)(fenc + (i + 4) * fencstride));
-            T01 = (*(__m64*)(fenc + (i + 5) * fencstride));
-            T02 = (*(__m64*)(fenc + (i + 6) * fencstride));
-            T03 = (*(__m64*)(fenc + (i + 7) * fencstride));
-
-            T10 = (*(__m64*)(fref + (i + 4) * frefstride));
-            T11 = (*(__m64*)(fref + (i + 5) * frefstride));
-            T12 = (*(__m64*)(fref + (i + 6) * frefstride));
-            T13 = (*(__m64*)(fref + (i + 7) * frefstride));
-
-            T20 = _mm_sad_pu8(T00, T10);
-            T21 = _mm_sad_pu8(T01, T11);
-            T22 = _mm_sad_pu8(T02, T12);
-            T23 = _mm_sad_pu8(T03, T13);
-
-            sum0 = _mm_add_pi16(sum0, T20);
-            sum0 = _mm_add_pi16(sum0, T21);
-            sum0 = _mm_add_pi16(sum0, T22);
-            sum0 = _mm_add_pi16(sum0, T23);
-
-            T00 = (*(__m64*)(fenc + (i + 8) * fencstride));
-            T01 = (*(__m64*)(fenc + (i + 9) * fencstride));
-            T02 = (*(__m64*)(fenc + (i + 10) * fencstride));
-            T03 = (*(__m64*)(fenc + (i + 11) * fencstride));
-
-            T10 = (*(__m64*)(fref + (i + 8) * frefstride));
-            T11 = (*(__m64*)(fref + (i + 9) * frefstride));
-            T12 = (*(__m64*)(fref + (i + 10) * frefstride));
-            T13 = (*(__m64*)(fref + (i + 11) * frefstride));
-
-            T20 = _mm_sad_pu8(T00, T10);
-            T21 = _mm_sad_pu8(T01, T11);
-            T22 = _mm_sad_pu8(T02, T12);
-            T23 = _mm_sad_pu8(T03, T13);
-
-            sum0 = _mm_add_pi16(sum0, T20);
-            sum0 = _mm_add_pi16(sum0, T21);
-            sum0 = _mm_add_pi16(sum0, T22);
-            sum0 = _mm_add_pi16(sum0, T23);
-
-            T00 = (*(__m64*)(fenc + (i + 12) * fencstride));
-            T01 = (*(__m64*)(fenc + (i + 13) * fencstride));
-            T02 = (*(__m64*)(fenc + (i + 14) * fencstride));
-            T03 = (*(__m64*)(fenc + (i + 15) * fencstride));
-
-            T10 = (*(__m64*)(fref + (i + 12) * frefstride));
-            T11 = (*(__m64*)(fref + (i + 13) * frefstride));
-            T12 = (*(__m64*)(fref + (i + 14) * frefstride));
-            T13 = (*(__m64*)(fref + (i + 15) * frefstride));
-
-            T20 = _mm_sad_pu8(T00, T10);
-            T21 = _mm_sad_pu8(T01, T11);
-            T22 = _mm_sad_pu8(T02, T12);
-            T23 = _mm_sad_pu8(T03, T13);
-
-            sum0 = _mm_add_pi16(sum0, T20);
-            sum0 = _mm_add_pi16(sum0, T21);
-            sum0 = _mm_add_pi16(sum0, T22);
-            sum0 = _mm_add_pi16(sum0, T23);
-        }
-    }
-    else if ((ly % 8) == 0)
-    {
-        for (int i = 0; i < ly; i += 8)
-        {
-            T00 = (*(__m64*)(fenc + (i + 0) * fencstride));
-            T01 = (*(__m64*)(fenc + (i + 1) * fencstride));
-            T02 = (*(__m64*)(fenc + (i + 2) * fencstride));
-            T03 = (*(__m64*)(fenc + (i + 3) * fencstride));
-
-            T10 = (*(__m64*)(fref + (i + 0) * frefstride));
-            T11 = (*(__m64*)(fref + (i + 1) * frefstride));
-            T12 = (*(__m64*)(fref + (i + 2) * frefstride));
-            T13 = (*(__m64*)(fref + (i + 3) * frefstride));
-
-            T20 = _mm_sad_pu8(T00, T10);
-            T21 = _mm_sad_pu8(T01, T11);
-            T22 = _mm_sad_pu8(T02, T12);
-            T23 = _mm_sad_pu8(T03, T13);
-
-            sum0 = _mm_add_pi16(sum0, T20);
-            sum0 = _mm_add_pi16(sum0, T21);
-            sum0 = _mm_add_pi16(sum0, T22);
-            sum0 = _mm_add_pi16(sum0, T23);
-
-            T00 = (*(__m64*)(fenc + (i + 4) * fencstride));
-            T01 = (*(__m64*)(fenc + (i + 5) * fencstride));
-            T02 = (*(__m64*)(fenc + (i + 6) * fencstride));
-            T03 = (*(__m64*)(fenc + (i + 7) * fencstride));
-
-            T10 = (*(__m64*)(fref + (i + 4) * frefstride));
-            T11 = (*(__m64*)(fref + (i + 5) * frefstride));
-            T12 = (*(__m64*)(fref + (i + 6) * frefstride));
-            T13 = (*(__m64*)(fref + (i + 7) * frefstride));
-
-            T20 = _mm_sad_pu8(T00, T10);
-            T21 = _mm_sad_pu8(T01, T11);
-            T22 = _mm_sad_pu8(T02, T12);
-            T23 = _mm_sad_pu8(T03, T13);
-
-            sum0 = _mm_add_pi16(sum0, T20);
-            sum0 = _mm_add_pi16(sum0, T21);
-            sum0 = _mm_add_pi16(sum0, T22);
-            sum0 = _mm_add_pi16(sum0, T23);
-        }
-    }
-    else
-    {
-        for (int i = 0; i < ly; i += 4)
-        {
-            T00 = (*(__m64*)(fenc + (i + 0) * fencstride));
-            T01 = (*(__m64*)(fenc + (i + 1) * fencstride));
-            T02 = (*(__m64*)(fenc + (i + 2) * fencstride));
-            T03 = (*(__m64*)(fenc + (i + 3) * fencstride));
-
-            T10 = (*(__m64*)(fref + (i + 0) * frefstride));
-            T11 = (*(__m64*)(fref + (i + 1) * frefstride));
-            T12 = (*(__m64*)(fref + (i + 2) * frefstride));
-            T13 = (*(__m64*)(fref + (i + 3) * frefstride));
-
-            T20 = _mm_sad_pu8(T00, T10);
-            T21 = _mm_sad_pu8(T01, T11);
-            T22 = _mm_sad_pu8(T02, T12);
-            T23 = _mm_sad_pu8(T03, T13);
-
-            sum0 = _mm_add_pi16(sum0, T20);
-            sum0 = _mm_add_pi16(sum0, T21);
-            sum0 = _mm_add_pi16(sum0, T22);
-            sum0 = _mm_add_pi16(sum0, T23);
-        }
+        T00 = (*(__m64*)(fenc + (i + 0) * fencstride));
+        T01 = (*(__m64*)(fenc + (i + 1) * fencstride));
+        T02 = (*(__m64*)(fenc + (i + 2) * fencstride));
+        T03 = (*(__m64*)(fenc + (i + 3) * fencstride));
+
+        T10 = (*(__m64*)(fref + (i + 0) * frefstride));
+        T11 = (*(__m64*)(fref + (i + 1) * frefstride));
+        T12 = (*(__m64*)(fref + (i + 2) * frefstride));
+        T13 = (*(__m64*)(fref + (i + 3) * frefstride));
+
+        T20 = _mm_sad_pu8(T00, T10);
+        T21 = _mm_sad_pu8(T01, T11);
+        T22 = _mm_sad_pu8(T02, T12);
+        T23 = _mm_sad_pu8(T03, T13);
+
+        sum0 = _mm_add_pi16(sum0, T20);
+        sum0 = _mm_add_pi16(sum0, T21);
+        sum0 = _mm_add_pi16(sum0, T22);
+        sum0 = _mm_add_pi16(sum0, T23);
+
+        T00 = (*(__m64*)(fenc + (i + 4) * fencstride));
+        T01 = (*(__m64*)(fenc + (i + 5) * fencstride));
+        T02 = (*(__m64*)(fenc + (i + 6) * fencstride));
+        T03 = (*(__m64*)(fenc + (i + 7) * fencstride));
+
+        T10 = (*(__m64*)(fref + (i + 4) * frefstride));
+        T11 = (*(__m64*)(fref + (i + 5) * frefstride));
+        T12 = (*(__m64*)(fref + (i + 6) * frefstride));
+        T13 = (*(__m64*)(fref + (i + 7) * frefstride));
+
+        T20 = _mm_sad_pu8(T00, T10);
+        T21 = _mm_sad_pu8(T01, T11);
+        T22 = _mm_sad_pu8(T02, T12);
+        T23 = _mm_sad_pu8(T03, T13);
+
+        sum0 = _mm_add_pi16(sum0, T20);
+        sum0 = _mm_add_pi16(sum0, T21);
+        sum0 = _mm_add_pi16(sum0, T22);
+        sum0 = _mm_add_pi16(sum0, T23);
+
+        T00 = (*(__m64*)(fenc + (i + 8) * fencstride));
+        T01 = (*(__m64*)(fenc + (i + 9) * fencstride));
+        T02 = (*(__m64*)(fenc + (i + 10) * fencstride));
+        T03 = (*(__m64*)(fenc + (i + 11) * fencstride));
+
+        T10 = (*(__m64*)(fref + (i + 8) * frefstride));
+        T11 = (*(__m64*)(fref + (i + 9) * frefstride));
+        T12 = (*(__m64*)(fref + (i + 10) * frefstride));
+        T13 = (*(__m64*)(fref + (i + 11) * frefstride));
+
+        T20 = _mm_sad_pu8(T00, T10);
+        T21 = _mm_sad_pu8(T01, T11);
+        T22 = _mm_sad_pu8(T02, T12);
+        T23 = _mm_sad_pu8(T03, T13);
+
+        sum0 = _mm_add_pi16(sum0, T20);
+        sum0 = _mm_add_pi16(sum0, T21);
+        sum0 = _mm_add_pi16(sum0, T22);
+        sum0 = _mm_add_pi16(sum0, T23);
+
+        T00 = (*(__m64*)(fenc + (i + 12) * fencstride));
+        T01 = (*(__m64*)(fenc + (i + 13) * fencstride));
+        T02 = (*(__m64*)(fenc + (i + 14) * fencstride));
+        T03 = (*(__m64*)(fenc + (i + 15) * fencstride));
+
+        T10 = (*(__m64*)(fref + (i + 12) * frefstride));
+        T11 = (*(__m64*)(fref + (i + 13) * frefstride));
+        T12 = (*(__m64*)(fref + (i + 14) * frefstride));
+        T13 = (*(__m64*)(fref + (i + 15) * frefstride));
+
+        T20 = _mm_sad_pu8(T00, T10);
+        T21 = _mm_sad_pu8(T01, T11);
+        T22 = _mm_sad_pu8(T02, T12);
+        T23 = _mm_sad_pu8(T03, T13);
+
+        sum0 = _mm_add_pi16(sum0, T20);
+        sum0 = _mm_add_pi16(sum0, T21);
+        sum0 = _mm_add_pi16(sum0, T22);
+        sum0 = _mm_add_pi16(sum0, T23);
     }
     // 8 * 255 -> 11 bits x 8 -> 14 bits
-    int sum = _m_to_int(sum0);
-    return sum;
+    return _m_to_int(sum0);
 }
 
 #else /* if HAVE_MMX */
 
-template<int ly>
+template<int ly>  // ly will always be 32
 int sad_8(pixel * fenc, intptr_t fencstride, pixel * fref, intptr_t frefstride)
 {
-    assert((ly % 4) == 0);
     __m128i sum0 = _mm_setzero_si128();
     __m128i sum1 = _mm_setzero_si128();
     __m128i T00, T01, T02, T03;
     __m128i T10, T11, T12, T13;
     __m128i T20, T21;
 
-    if (ly == 4)
+    for (int i = 0; i < ly; i += 8)
     {
-        T00 = _mm_loadl_epi64((__m128i*)(fenc + (0) * fencstride));
-        T01 = _mm_loadl_epi64((__m128i*)(fenc + (1) * fencstride));
+        T00 = _mm_loadl_epi64((__m128i*)(fenc + (i + 0) * fencstride));
+        T01 = _mm_loadl_epi64((__m128i*)(fenc + (i + 1) * fencstride));
         T01 = _mm_unpacklo_epi64(T00, T01);
-        T02 = _mm_loadl_epi64((__m128i*)(fenc + (2) * fencstride));
-        T03 = _mm_loadl_epi64((__m128i*)(fenc + (3) * fencstride));
+        T02 = _mm_loadl_epi64((__m128i*)(fenc + (i + 2) * fencstride));
+        T03 = _mm_loadl_epi64((__m128i*)(fenc + (i + 3) * fencstride));
         T03 = _mm_unpacklo_epi64(T02, T03);
 
-        T10 = _mm_loadl_epi64((__m128i*)(fref + (0) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref + (1) * frefstride));
+        T10 = _mm_loadl_epi64((__m128i*)(fref + (i + 0) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref + (i + 1) * frefstride));
         T11 = _mm_unpacklo_epi64(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref + (2) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref + (3) * frefstride));
+        T12 = _mm_loadl_epi64((__m128i*)(fref + (i + 2) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref + (i + 3) * frefstride));
         T13 = _mm_unpacklo_epi64(T12, T13);
         T20 = _mm_sad_epu8(T01, T11);
         T21 = _mm_sad_epu8(T03, T13);
 


More information about the x265-commits mailing list