[x265] [PATCH] pixel8.inc: Optimization with sad_x3

praveen at multicorewareinc.com praveen at multicorewareinc.com
Mon Sep 2 08:31:38 CEST 2013


# HG changeset patch
# User praveentiwari
# Date 1378103284 -19800
# Node ID 1a632b2e0fc7a3256202d3afd7dc187042081607
# Parent  5c27dd5f854217e6229a3f70dd2b9b7fb5b9d7bd
pixel8.inc: Optimization with sad_x3

diff -r 5c27dd5f8542 -r 1a632b2e0fc7 source/common/vec/pixel8.inc
--- a/source/common/vec/pixel8.inc	Sun Sep 01 06:46:43 2013 -0500
+++ b/source/common/vec/pixel8.inc	Mon Sep 02 11:58:04 2013 +0530
@@ -1332,6 +1332,996 @@
 {
     assert((ly % 4) == 0);
 
+    if (ly == 4)
+    {
+        __m64 sum0, sum1, sum2;
+        __m64 T10, T11, T12, T13;
+        __m64 T00, T01, T02, T03;
+        __m64 T20, T21;
+
+        T00 = _mm_cvtsi32_si64(*(int*)(fenc));
+        T01 = _mm_cvtsi32_si64(*(int*)(fenc + FENC_STRIDE));
+        T00 = _mm_unpacklo_pi8(T00, T01);
+        T02 = _mm_cvtsi32_si64(*(int*)(fenc + (FENC_STRIDE << 1)));
+        T03 = _mm_cvtsi32_si64(*(int*)(fenc + 3 * FENC_STRIDE));
+        T02 = _mm_unpacklo_pi8(T02, T03);
+
+        T10 = _mm_cvtsi32_si64(*(int*)(fref1));
+        T11 = _mm_cvtsi32_si64(*(int*)(fref1 + frefstride));
+        T10 = _mm_unpacklo_pi8(T10, T11);
+        T12 = _mm_cvtsi32_si64(*(int*)(fref1 + 2 * frefstride));
+        T13 = _mm_cvtsi32_si64(*(int*)(fref1 + 3 * frefstride));
+        T12 = _mm_unpacklo_pi8(T12, T13);
+
+        T20 = _mm_sad_pu8(T00, T10);
+        T21 = _mm_sad_pu8(T02, T12);
+
+        sum0 = _mm_add_pi16(T20, T21);
+
+        T10 = _mm_cvtsi32_si64(*(int*)(fref2));
+        T11 = _mm_cvtsi32_si64(*(int*)(fref2 + frefstride));
+        T10 = _mm_unpacklo_pi8(T10, T11);
+        T12 = _mm_cvtsi32_si64(*(int*)(fref2 + 2 * frefstride));
+        T13 = _mm_cvtsi32_si64(*(int*)(fref2 + 3 * frefstride));
+        T12 = _mm_unpacklo_pi8(T12, T13);
+
+        T20 = _mm_sad_pu8(T00, T10);
+        T21 = _mm_sad_pu8(T02, T12);
+
+        sum1 = _mm_add_pi16(T20, T21);
+
+        T10 = _mm_cvtsi32_si64(*(int*)(fref3));
+        T11 = _mm_cvtsi32_si64(*(int*)(fref3 + frefstride));
+        T10 = _mm_unpacklo_pi8(T10, T11);
+        T12 = _mm_cvtsi32_si64(*(int*)(fref3 + 2 * frefstride));
+        T13 = _mm_cvtsi32_si64(*(int*)(fref3 + 3 * frefstride));
+        T12 = _mm_unpacklo_pi8(T12, T13);
+
+        T20 = _mm_sad_pu8(T00, T10);
+        T21 = _mm_sad_pu8(T02, T12);
+
+        sum2 = _mm_add_pi16(T20, T21);
+
+        res[0] = _m_to_int(sum0);
+        res[1] = _m_to_int(sum1);
+        res[2] = _m_to_int(sum2);
+    }
+    else if (ly == 8)
+    {
+        __m128i sum0, sum1, sum2;
+
+        __m128i T00, T01, T02, T03;
+        __m128i T10, T11, T12, T13;
+        __m128i R00, R01, R02, R03;
+        __m128i T20;
+
+        T00 = _mm_loadl_epi64((__m128i*)(fenc));
+        T01 = _mm_loadl_epi64((__m128i*)(fenc + FENC_STRIDE));
+        T01 = _mm_unpacklo_epi32(T00, T01);
+        T02 = _mm_loadl_epi64((__m128i*)(fenc + (2) * FENC_STRIDE));
+        T03 = _mm_loadl_epi64((__m128i*)(fenc + (3) * FENC_STRIDE));
+        T03 = _mm_unpacklo_epi32(T02, T03);
+        R00 = _mm_unpacklo_epi64(T01, T03);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref1));
+        T11 = _mm_loadl_epi64((__m128i*)(fref1 + frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref1 + 2 * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref1 + 3 * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R01 = _mm_unpacklo_epi64(T11, T13);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref2));
+        T11 = _mm_loadl_epi64((__m128i*)(fref2 + frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref2 + 2 * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref2 + 3 * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R02 = _mm_unpacklo_epi64(T11, T13);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref3));
+        T11 = _mm_loadl_epi64((__m128i*)(fref3 + frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref3 + 2 * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref3 + 3 * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R03 = _mm_unpacklo_epi64(T11, T13);
+
+        T20 = _mm_sad_epu8(R00, R01);
+        sum0 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+
+        T20 = _mm_sad_epu8(R00, R02);
+        sum1 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+
+        T20 = _mm_sad_epu8(R00, R03);
+        sum2 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+
+        T00 = _mm_loadl_epi64((__m128i*)(fenc + (4) * FENC_STRIDE));
+        T01 = _mm_loadl_epi64((__m128i*)(fenc + (5) * FENC_STRIDE));
+        T01 = _mm_unpacklo_epi32(T00, T01);
+        T02 = _mm_loadl_epi64((__m128i*)(fenc + (6) * FENC_STRIDE));
+        T03 = _mm_loadl_epi64((__m128i*)(fenc + (7) * FENC_STRIDE));
+        T03 = _mm_unpacklo_epi32(T02, T03);
+        R00 = _mm_unpacklo_epi64(T01, T03);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref1 + 4 * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref1 + 5 * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref1 + 6 * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref1 + 7 * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R01 = _mm_unpacklo_epi64(T11, T13);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref2 + 4 * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref2 + 5 * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref2 + 6 * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref2 + 7 * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R02 = _mm_unpacklo_epi64(T11, T13);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref3 + 4 * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref3 + 5 * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref3 + 6 * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref3 + 7 * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R03 = _mm_unpacklo_epi64(T11, T13);
+
+        T20 = _mm_sad_epu8(R00, R01);
+        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+        sum0 = _mm_add_epi32(sum0, T20);
+
+        T20 = _mm_sad_epu8(R00, R02);
+        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+        sum1 = _mm_add_epi32(sum1, T20);
+
+        T20 = _mm_sad_epu8(R00, R03);
+        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+        sum2 = _mm_add_epi32(sum2, T20);
+
+        res[0] = _mm_cvtsi128_si32(sum0);
+        res[1] = _mm_cvtsi128_si32(sum1);
+        res[2] = _mm_cvtsi128_si32(sum2);
+    }
+    else if (ly == 16)
+    {
+        __m128i sum0, sum1, sum2;
+
+        __m128i T00, T01, T02, T03;
+        __m128i T10, T11, T12, T13;
+        __m128i R00, R01, R02, R03;
+        __m128i T20;
+
+        T00 = _mm_loadl_epi64((__m128i*)(fenc));
+        T01 = _mm_loadl_epi64((__m128i*)(fenc + FENC_STRIDE));
+        T01 = _mm_unpacklo_epi32(T00, T01);
+        T02 = _mm_loadl_epi64((__m128i*)(fenc + (2) * FENC_STRIDE));
+        T03 = _mm_loadl_epi64((__m128i*)(fenc + (3) * FENC_STRIDE));
+        T03 = _mm_unpacklo_epi32(T02, T03);
+        R00 = _mm_unpacklo_epi64(T01, T03);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref1));
+        T11 = _mm_loadl_epi64((__m128i*)(fref1 + frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (2) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (3) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R01 = _mm_unpacklo_epi64(T11, T13);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref2));
+        T11 = _mm_loadl_epi64((__m128i*)(fref2 + frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (2) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (3) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R02 = _mm_unpacklo_epi64(T11, T13);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref3));
+        T11 = _mm_loadl_epi64((__m128i*)(fref3 + frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (2) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (3) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R03 = _mm_unpacklo_epi64(T11, T13);
+
+        T20 = _mm_sad_epu8(R00, R01);
+        sum0 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+
+        T20 = _mm_sad_epu8(R00, R02);
+        sum1 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+
+        T20 = _mm_sad_epu8(R00, R03);
+        sum2 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+
+        T00 = _mm_loadl_epi64((__m128i*)(fenc + (4) * FENC_STRIDE));
+        T01 = _mm_loadl_epi64((__m128i*)(fenc + (5) * FENC_STRIDE));
+        T01 = _mm_unpacklo_epi32(T00, T01);
+        T02 = _mm_loadl_epi64((__m128i*)(fenc + (6) * FENC_STRIDE));
+        T03 = _mm_loadl_epi64((__m128i*)(fenc + (7) * FENC_STRIDE));
+        T03 = _mm_unpacklo_epi32(T02, T03);
+        R00 = _mm_unpacklo_epi64(T01, T03);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref1 + (4) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref1 + (5) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (6) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (7) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R01 = _mm_unpacklo_epi64(T11, T13);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref2 + (4) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref2 + (5) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (6) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (7) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R02 = _mm_unpacklo_epi64(T11, T13);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref3 + (4) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref3 + (5) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (6) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (7) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R03 = _mm_unpacklo_epi64(T11, T13);
+
+        T20 = _mm_sad_epu8(R00, R01);
+        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+        sum0 = _mm_add_epi32(sum0, T20);
+
+        T20 = _mm_sad_epu8(R00, R02);
+        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+        sum1 = _mm_add_epi32(sum1, T20);
+
+        T20 = _mm_sad_epu8(R00, R03);
+        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+        sum2 = _mm_add_epi32(sum2, T20);
+
+        T00 = _mm_loadl_epi64((__m128i*)(fenc + (8) * FENC_STRIDE));
+        T01 = _mm_loadl_epi64((__m128i*)(fenc + (9) * FENC_STRIDE));
+        T01 = _mm_unpacklo_epi32(T00, T01);
+        T02 = _mm_loadl_epi64((__m128i*)(fenc + (10) * FENC_STRIDE));
+        T03 = _mm_loadl_epi64((__m128i*)(fenc + (11) * FENC_STRIDE));
+        T03 = _mm_unpacklo_epi32(T02, T03);
+        R00 = _mm_unpacklo_epi64(T01, T03);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref1 + (8) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref1 + (9) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (10) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (11) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R01 = _mm_unpacklo_epi64(T11, T13);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref2 + (8) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref2 + (9) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (10) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (11) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R02 = _mm_unpacklo_epi64(T11, T13);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref3 + (8) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref3 + (9) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (10) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (11) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R03 = _mm_unpacklo_epi64(T11, T13);
+
+        T20 = _mm_sad_epu8(R00, R01);
+        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+        sum0 = _mm_add_epi32(sum0, T20);
+
+        T20 = _mm_sad_epu8(R00, R02);
+        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+        sum1 = _mm_add_epi32(sum1, T20);
+
+        T20 = _mm_sad_epu8(R00, R03);
+        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+        sum2 = _mm_add_epi32(sum2, T20);
+
+        T00 = _mm_loadl_epi64((__m128i*)(fenc + (12) * FENC_STRIDE));
+        T01 = _mm_loadl_epi64((__m128i*)(fenc + (13) * FENC_STRIDE));
+        T01 = _mm_unpacklo_epi32(T00, T01);
+        T02 = _mm_loadl_epi64((__m128i*)(fenc + (14) * FENC_STRIDE));
+        T03 = _mm_loadl_epi64((__m128i*)(fenc + (15) * FENC_STRIDE));
+        T03 = _mm_unpacklo_epi32(T02, T03);
+        R00 = _mm_unpacklo_epi64(T01, T03);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref1 + (12) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref1 + (13) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (14) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (15) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R01 = _mm_unpacklo_epi64(T11, T13);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref2 + (12) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref2 + (13) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (14) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (15) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R02 = _mm_unpacklo_epi64(T11, T13);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref3 + (12) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref3 + (13) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (14) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (15) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R03 = _mm_unpacklo_epi64(T11, T13);
+
+        T20 = _mm_sad_epu8(R00, R01);
+        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+        sum0 = _mm_add_epi32(sum0, T20);
+
+        T20 = _mm_sad_epu8(R00, R02);
+        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+        sum1 = _mm_add_epi32(sum1, T20);
+
+        T20 = _mm_sad_epu8(R00, R03);
+        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+        sum2 = _mm_add_epi32(sum2, T20);
+
+        res[0] = _mm_cvtsi128_si32(sum0);
+        res[1] = _mm_cvtsi128_si32(sum1);
+        res[2] = _mm_cvtsi128_si32(sum2);
+    }
+    else if ((ly % 8) == 0)
+    {
+        __m128i sum0 = _mm_setzero_si128();
+        __m128i sum1 = _mm_setzero_si128();
+        __m128i sum2 = _mm_setzero_si128();
+
+        __m128i T00, T01, T02, T03;
+        __m128i T10, T11, T12, T13;
+        __m128i R00, R01, R02, R03;
+        __m128i T20;
+
+        for (int i = 0; i < ly; i += 8)
+        {
+            T00 = _mm_loadl_epi64((__m128i*)(fenc + (i + 0) * FENC_STRIDE));
+            T01 = _mm_loadl_epi64((__m128i*)(fenc + (i + 1) * FENC_STRIDE));
+            T01 = _mm_unpacklo_epi32(T00, T01);
+            T02 = _mm_loadl_epi64((__m128i*)(fenc + (i + 2) * FENC_STRIDE));
+            T03 = _mm_loadl_epi64((__m128i*)(fenc + (i + 3) * FENC_STRIDE));
+            T03 = _mm_unpacklo_epi32(T02, T03);
+            R00 = _mm_unpacklo_epi64(T01, T03);
+
+            T10 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 0) * frefstride));
+            T11 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 1) * frefstride));
+            T11 = _mm_unpacklo_epi32(T10, T11);
+            T12 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 2) * frefstride));
+            T13 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 3) * frefstride));
+            T13 = _mm_unpacklo_epi32(T12, T13);
+            R01 = _mm_unpacklo_epi64(T11, T13);
+
+            T10 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 0) * frefstride));
+            T11 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 1) * frefstride));
+            T11 = _mm_unpacklo_epi32(T10, T11);
+            T12 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 2) * frefstride));
+            T13 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 3) * frefstride));
+            T13 = _mm_unpacklo_epi32(T12, T13);
+            R02 = _mm_unpacklo_epi64(T11, T13);
+
+            T10 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 0) * frefstride));
+            T11 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 1) * frefstride));
+            T11 = _mm_unpacklo_epi32(T10, T11);
+            T12 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 2) * frefstride));
+            T13 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 3) * frefstride));
+            T13 = _mm_unpacklo_epi32(T12, T13);
+            R03 = _mm_unpacklo_epi64(T11, T13);
+
+            T20 = _mm_sad_epu8(R00, R01);
+            T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+            sum0 = _mm_add_epi32(sum0, T20);
+
+            T20 = _mm_sad_epu8(R00, R02);
+            T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+            sum1 = _mm_add_epi32(sum1, T20);
+
+            T20 = _mm_sad_epu8(R00, R03);
+            T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+            sum2 = _mm_add_epi32(sum2, T20);
+
+            T00 = _mm_loadl_epi64((__m128i*)(fenc + (i + 4) * FENC_STRIDE));
+            T01 = _mm_loadl_epi64((__m128i*)(fenc + (i + 5) * FENC_STRIDE));
+            T01 = _mm_unpacklo_epi32(T00, T01);
+            T02 = _mm_loadl_epi64((__m128i*)(fenc + (i + 6) * FENC_STRIDE));
+            T03 = _mm_loadl_epi64((__m128i*)(fenc + (i + 7) * FENC_STRIDE));
+            T03 = _mm_unpacklo_epi32(T02, T03);
+            R00 = _mm_unpacklo_epi64(T01, T03);
+
+            T10 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 4) * frefstride));
+            T11 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 5) * frefstride));
+            T11 = _mm_unpacklo_epi32(T10, T11);
+            T12 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 6) * frefstride));
+            T13 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 7) * frefstride));
+            T13 = _mm_unpacklo_epi32(T12, T13);
+            R01 = _mm_unpacklo_epi64(T11, T13);
+
+            T10 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 4) * frefstride));
+            T11 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 5) * frefstride));
+            T11 = _mm_unpacklo_epi32(T10, T11);
+            T12 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 6) * frefstride));
+            T13 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 7) * frefstride));
+            T13 = _mm_unpacklo_epi32(T12, T13);
+            R02 = _mm_unpacklo_epi64(T11, T13);
+
+            T10 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 4) * frefstride));
+            T11 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 5) * frefstride));
+            T11 = _mm_unpacklo_epi32(T10, T11);
+            T12 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 6) * frefstride));
+            T13 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 7) * frefstride));
+            T13 = _mm_unpacklo_epi32(T12, T13);
+            R03 = _mm_unpacklo_epi64(T11, T13);
+
+            T20 = _mm_sad_epu8(R00, R01);
+            T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+            sum0 = _mm_add_epi32(sum0, T20);
+
+            T20 = _mm_sad_epu8(R00, R02);
+            T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+            sum1 = _mm_add_epi32(sum1, T20);
+
+            T20 = _mm_sad_epu8(R00, R03);
+            T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+            sum2 = _mm_add_epi32(sum2, T20);
+        }
+
+        res[0] = _mm_cvtsi128_si32(sum0);
+        res[1] = _mm_cvtsi128_si32(sum1);
+        res[2] = _mm_cvtsi128_si32(sum2);
+    }
+    else
+    {
+        __m64 sum0 = _mm_setzero_si64();
+        __m64 sum1 = _mm_setzero_si64();
+        __m64 sum2 = _mm_setzero_si64();
+
+        __m64 T10, T11, T12, T13;
+        __m64 T00, T01, T02, T03;
+        __m64 T20, T21;
+
+        for (int i = 0; i < ly; i += 4)
+        {
+            int frefstrideZero = (i + 0) * frefstride;
+            int frefstrideOne = (i + 1) * frefstride;
+            int frefstrideTwo = (i + 2) * frefstride;
+            int frefstrideThree = (i + 3) * frefstride;
+
+            T00 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 0) * FENC_STRIDE));
+            T01 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 1) * FENC_STRIDE));
+            T00 = _mm_unpacklo_pi8(T00, T01);
+            T02 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 2) * FENC_STRIDE));
+            T03 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 3) * FENC_STRIDE));
+            T02 = _mm_unpacklo_pi8(T02, T03);
+
+            T10 = _mm_cvtsi32_si64(*(int*)(fref1 + frefstrideZero));
+            T11 = _mm_cvtsi32_si64(*(int*)(fref1 + frefstrideOne));
+            T10 = _mm_unpacklo_pi8(T10, T11);
+            T12 = _mm_cvtsi32_si64(*(int*)(fref1 + frefstrideTwo));
+            T13 = _mm_cvtsi32_si64(*(int*)(fref1 + frefstrideThree));
+            T12 = _mm_unpacklo_pi8(T12, T13);
+
+            T20 = _mm_sad_pu8(T00, T10);
+            T21 = _mm_sad_pu8(T02, T12);
+
+            sum0 = _mm_add_pi16(sum0, T20);
+            sum0 = _mm_add_pi16(sum0, T21);
+
+            T10 = _mm_cvtsi32_si64(*(int*)(fref2 + frefstrideZero));
+            T11 = _mm_cvtsi32_si64(*(int*)(fref2 + frefstrideOne));
+            T10 = _mm_unpacklo_pi8(T10, T11);
+            T12 = _mm_cvtsi32_si64(*(int*)(fref2 + frefstrideTwo));
+            T13 = _mm_cvtsi32_si64(*(int*)(fref2 + frefstrideThree));
+            T12 = _mm_unpacklo_pi8(T12, T13);
+
+            T20 = _mm_sad_pu8(T00, T10);
+            T21 = _mm_sad_pu8(T02, T12);
+
+            sum1 = _mm_add_pi16(sum1, T20);
+            sum1 = _mm_add_pi16(sum1, T21);
+
+            T10 = _mm_cvtsi32_si64(*(int*)(fref3 + frefstrideZero));
+            T11 = _mm_cvtsi32_si64(*(int*)(fref3 + frefstrideOne));
+            T10 = _mm_unpacklo_pi8(T10, T11);
+            T12 = _mm_cvtsi32_si64(*(int*)(fref3 + frefstrideTwo));
+            T13 = _mm_cvtsi32_si64(*(int*)(fref3 + frefstrideThree));
+            T12 = _mm_unpacklo_pi8(T12, T13);
+
+            T20 = _mm_sad_pu8(T00, T10);
+            T21 = _mm_sad_pu8(T02, T12);
+
+            sum2 = _mm_add_pi16(sum2, T20);
+            sum2 = _mm_add_pi16(sum2, T21);
+        }
+
+        res[0] = _m_to_int(sum0);
+        res[1] = _m_to_int(sum1);
+        res[2] = _m_to_int(sum2);
+    }
+}
+
+#else /* if HAVE_MMX */
+
+template<int ly>
+void sad_x3_4(pixel *fenc, pixel *fref1, pixel *fref2, pixel *fref3, intptr_t frefstride, int *res)
+{
+    assert((ly % 4) == 0);
+    __m128i sum0 = _mm_setzero_si128();
+    __m128i sum1 = _mm_setzero_si128();
+    __m128i sum2 = _mm_setzero_si128();
+
+    __m128i T00, T01, T02, T03;
+    __m128i T10, T11, T12, T13;
+    __m128i R00, R01, R02, R03;
+    __m128i T20;
+
+    if (ly == 4)
+    {
+        T00 = _mm_loadl_epi64((__m128i*)(fenc + (0) * FENC_STRIDE));
+        T01 = _mm_loadl_epi64((__m128i*)(fenc + (1) * FENC_STRIDE));
+        T01 = _mm_unpacklo_epi32(T00, T01);
+        T02 = _mm_loadl_epi64((__m128i*)(fenc + (2) * FENC_STRIDE));
+        T03 = _mm_loadl_epi64((__m128i*)(fenc + (3) * FENC_STRIDE));
+        T03 = _mm_unpacklo_epi32(T02, T03);
+        R00 = _mm_unpacklo_epi64(T01, T03);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref1 + (0) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref1 + (1) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (2) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (3) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R01 = _mm_unpacklo_epi64(T11, T13);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref2 + (0) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref2 + (1) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (2) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (3) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R02 = _mm_unpacklo_epi64(T11, T13);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref3 + (0) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref3 + (1) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (2) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (3) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R03 = _mm_unpacklo_epi64(T11, T13);
+
+        T20 = _mm_sad_epu8(R00, R01);
+        sum0 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+
+        T20 = _mm_sad_epu8(R00, R02);
+        sum1 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+
+        T20 = _mm_sad_epu8(R00, R03);
+        sum2 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+    }
+    else if (ly == 8)
+    {
+        T00 = _mm_loadl_epi64((__m128i*)(fenc + (0) * FENC_STRIDE));
+        T01 = _mm_loadl_epi64((__m128i*)(fenc + (1) * FENC_STRIDE));
+        T01 = _mm_unpacklo_epi32(T00, T01);
+        T02 = _mm_loadl_epi64((__m128i*)(fenc + (2) * FENC_STRIDE));
+        T03 = _mm_loadl_epi64((__m128i*)(fenc + (3) * FENC_STRIDE));
+        T03 = _mm_unpacklo_epi32(T02, T03);
+        R00 = _mm_unpacklo_epi64(T01, T03);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref1 + (0) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref1 + (1) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (2) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (3) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R01 = _mm_unpacklo_epi64(T11, T13);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref2 + (0) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref2 + (1) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (2) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (3) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R02 = _mm_unpacklo_epi64(T11, T13);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref3 + (0) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref3 + (1) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (2) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (3) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R03 = _mm_unpacklo_epi64(T11, T13);
+
+        T20 = _mm_sad_epu8(R00, R01);
+        sum0 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+
+        T20 = _mm_sad_epu8(R00, R02);
+        sum1 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+
+        T20 = _mm_sad_epu8(R00, R03);
+        sum2 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+
+        T00 = _mm_loadl_epi64((__m128i*)(fenc + (4) * FENC_STRIDE));
+        T01 = _mm_loadl_epi64((__m128i*)(fenc + (5) * FENC_STRIDE));
+        T01 = _mm_unpacklo_epi32(T00, T01);
+        T02 = _mm_loadl_epi64((__m128i*)(fenc + (6) * FENC_STRIDE));
+        T03 = _mm_loadl_epi64((__m128i*)(fenc + (7) * FENC_STRIDE));
+        T03 = _mm_unpacklo_epi32(T02, T03);
+        R00 = _mm_unpacklo_epi64(T01, T03);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref1 + (4) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref1 + (5) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (6) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (7) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R01 = _mm_unpacklo_epi64(T11, T13);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref2 + (4) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref2 + (5) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (6) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (7) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R02 = _mm_unpacklo_epi64(T11, T13);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref3 + (4) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref3 + (5) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (6) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (7) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R03 = _mm_unpacklo_epi64(T11, T13);
+
+        T20 = _mm_sad_epu8(R00, R01);
+        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+        sum0 = _mm_add_epi32(sum0, T20);
+
+        T20 = _mm_sad_epu8(R00, R02);
+        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+        sum1 = _mm_add_epi32(sum1, T20);
+
+        T20 = _mm_sad_epu8(R00, R03);
+        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+        sum2 = _mm_add_epi32(sum2, T20);
+    }
+    else if (ly == 16)
+    {
+        T00 = _mm_loadl_epi64((__m128i*)(fenc + (0) * FENC_STRIDE));
+        T01 = _mm_loadl_epi64((__m128i*)(fenc + (1) * FENC_STRIDE));
+        T01 = _mm_unpacklo_epi32(T00, T01);
+        T02 = _mm_loadl_epi64((__m128i*)(fenc + (2) * FENC_STRIDE));
+        T03 = _mm_loadl_epi64((__m128i*)(fenc + (3) * FENC_STRIDE));
+        T03 = _mm_unpacklo_epi32(T02, T03);
+        R00 = _mm_unpacklo_epi64(T01, T03);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref1 + (0) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref1 + (1) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (2) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (3) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R01 = _mm_unpacklo_epi64(T11, T13);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref2 + (0) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref2 + (1) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (2) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (3) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R02 = _mm_unpacklo_epi64(T11, T13);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref3 + (0) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref3 + (1) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (2) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (3) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R03 = _mm_unpacklo_epi64(T11, T13);
+
+        T20 = _mm_sad_epu8(R00, R01);
+        sum0 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+
+        T20 = _mm_sad_epu8(R00, R02);
+        sum1 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+
+        T20 = _mm_sad_epu8(R00, R03);
+        sum2 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+
+        T00 = _mm_loadl_epi64((__m128i*)(fenc + (4) * FENC_STRIDE));
+        T01 = _mm_loadl_epi64((__m128i*)(fenc + (5) * FENC_STRIDE));
+        T01 = _mm_unpacklo_epi32(T00, T01);
+        T02 = _mm_loadl_epi64((__m128i*)(fenc + (6) * FENC_STRIDE));
+        T03 = _mm_loadl_epi64((__m128i*)(fenc + (7) * FENC_STRIDE));
+        T03 = _mm_unpacklo_epi32(T02, T03);
+        R00 = _mm_unpacklo_epi64(T01, T03);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref1 + (4) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref1 + (5) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (6) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (7) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R01 = _mm_unpacklo_epi64(T11, T13);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref2 + (4) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref2 + (5) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (6) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (7) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R02 = _mm_unpacklo_epi64(T11, T13);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref3 + (4) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref3 + (5) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (6) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (7) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R03 = _mm_unpacklo_epi64(T11, T13);
+
+        T20 = _mm_sad_epu8(R00, R01);
+        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+        sum0 = _mm_add_epi32(sum0, T20);
+
+        T20 = _mm_sad_epu8(R00, R02);
+        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+        sum1 = _mm_add_epi32(sum1, T20);
+
+        T20 = _mm_sad_epu8(R00, R03);
+        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+        sum2 = _mm_add_epi32(sum2, T20);
+
+        T00 = _mm_loadl_epi64((__m128i*)(fenc + (8) * FENC_STRIDE));
+        T01 = _mm_loadl_epi64((__m128i*)(fenc + (9) * FENC_STRIDE));
+        T01 = _mm_unpacklo_epi32(T00, T01);
+        T02 = _mm_loadl_epi64((__m128i*)(fenc + (10) * FENC_STRIDE));
+        T03 = _mm_loadl_epi64((__m128i*)(fenc + (11) * FENC_STRIDE));
+        T03 = _mm_unpacklo_epi32(T02, T03);
+        R00 = _mm_unpacklo_epi64(T01, T03);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref1 + (8) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref1 + (9) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (10) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (11) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R01 = _mm_unpacklo_epi64(T11, T13);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref2 + (8) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref2 + (9) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (10) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (11) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R02 = _mm_unpacklo_epi64(T11, T13);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref3 + (8) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref3 + (9) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (10) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (11) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R03 = _mm_unpacklo_epi64(T11, T13);
+
+        T20 = _mm_sad_epu8(R00, R01);
+        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+        sum0 = _mm_add_epi32(sum0, T20);
+
+        T20 = _mm_sad_epu8(R00, R02);
+        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+        sum1 = _mm_add_epi32(sum1, T20);
+
+        T20 = _mm_sad_epu8(R00, R03);
+        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+        sum2 = _mm_add_epi32(sum2, T20);
+
+        T00 = _mm_loadl_epi64((__m128i*)(fenc + (12) * FENC_STRIDE));
+        T01 = _mm_loadl_epi64((__m128i*)(fenc + (13) * FENC_STRIDE));
+        T01 = _mm_unpacklo_epi32(T00, T01);
+        T02 = _mm_loadl_epi64((__m128i*)(fenc + (14) * FENC_STRIDE));
+        T03 = _mm_loadl_epi64((__m128i*)(fenc + (15) * FENC_STRIDE));
+        T03 = _mm_unpacklo_epi32(T02, T03);
+        R00 = _mm_unpacklo_epi64(T01, T03);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref1 + (12) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref1 + (13) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (14) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (15) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R01 = _mm_unpacklo_epi64(T11, T13);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref2 + (12) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref2 + (13) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (14) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (15) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R02 = _mm_unpacklo_epi64(T11, T13);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref3 + (12) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref3 + (13) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (14) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (15) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R03 = _mm_unpacklo_epi64(T11, T13);
+
+        T20 = _mm_sad_epu8(R00, R01);
+        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+        sum0 = _mm_add_epi32(sum0, T20);
+
+        T20 = _mm_sad_epu8(R00, R02);
+        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+        sum1 = _mm_add_epi32(sum1, T20);
+
+        T20 = _mm_sad_epu8(R00, R03);
+        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+        sum2 = _mm_add_epi32(sum2, T20);
+    }
+    else if ((ly % 8) == 0)
+    {
+        for (int i = 0; i < ly; i += 8)
+        {
+            T00 = _mm_loadl_epi64((__m128i*)(fenc + (i + 0) * FENC_STRIDE));
+            T01 = _mm_loadl_epi64((__m128i*)(fenc + (i + 1) * FENC_STRIDE));
+            T01 = _mm_unpacklo_epi32(T00, T01);
+            T02 = _mm_loadl_epi64((__m128i*)(fenc + (i + 2) * FENC_STRIDE));
+            T03 = _mm_loadl_epi64((__m128i*)(fenc + (i + 3) * FENC_STRIDE));
+            T03 = _mm_unpacklo_epi32(T02, T03);
+            R00 = _mm_unpacklo_epi64(T01, T03);
+
+            T10 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 0) * frefstride));
+            T11 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 1) * frefstride));
+            T11 = _mm_unpacklo_epi32(T10, T11);
+            T12 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 2) * frefstride));
+            T13 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 3) * frefstride));
+            T13 = _mm_unpacklo_epi32(T12, T13);
+            R01 = _mm_unpacklo_epi64(T11, T13);
+
+            T10 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 0) * frefstride));
+            T11 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 1) * frefstride));
+            T11 = _mm_unpacklo_epi32(T10, T11);
+            T12 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 2) * frefstride));
+            T13 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 3) * frefstride));
+            T13 = _mm_unpacklo_epi32(T12, T13);
+            R02 = _mm_unpacklo_epi64(T11, T13);
+
+            T10 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 0) * frefstride));
+            T11 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 1) * frefstride));
+            T11 = _mm_unpacklo_epi32(T10, T11);
+            T12 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 2) * frefstride));
+            T13 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 3) * frefstride));
+            T13 = _mm_unpacklo_epi32(T12, T13);
+            R03 = _mm_unpacklo_epi64(T11, T13);
+
+            T20 = _mm_sad_epu8(R00, R01);
+            T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+            sum0 = _mm_add_epi32(sum0, T20);
+
+            T20 = _mm_sad_epu8(R00, R02);
+            T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+            sum1 = _mm_add_epi32(sum1, T20);
+
+            T20 = _mm_sad_epu8(R00, R03);
+            T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+            sum2 = _mm_add_epi32(sum2, T20);
+
+            T00 = _mm_loadl_epi64((__m128i*)(fenc + (i + 4) * FENC_STRIDE));
+            T01 = _mm_loadl_epi64((__m128i*)(fenc + (i + 5) * FENC_STRIDE));
+            T01 = _mm_unpacklo_epi32(T00, T01);
+            T02 = _mm_loadl_epi64((__m128i*)(fenc + (i + 6) * FENC_STRIDE));
+            T03 = _mm_loadl_epi64((__m128i*)(fenc + (i + 7) * FENC_STRIDE));
+            T03 = _mm_unpacklo_epi32(T02, T03);
+            R00 = _mm_unpacklo_epi64(T01, T03);
+
+            T10 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 4) * frefstride));
+            T11 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 5) * frefstride));
+            T11 = _mm_unpacklo_epi32(T10, T11);
+            T12 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 6) * frefstride));
+            T13 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 7) * frefstride));
+            T13 = _mm_unpacklo_epi32(T12, T13);
+            R01 = _mm_unpacklo_epi64(T11, T13);
+
+            T10 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 4) * frefstride));
+            T11 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 5) * frefstride));
+            T11 = _mm_unpacklo_epi32(T10, T11);
+            T12 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 6) * frefstride));
+            T13 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 7) * frefstride));
+            T13 = _mm_unpacklo_epi32(T12, T13);
+            R02 = _mm_unpacklo_epi64(T11, T13);
+
+            T10 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 4) * frefstride));
+            T11 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 5) * frefstride));
+            T11 = _mm_unpacklo_epi32(T10, T11);
+            T12 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 6) * frefstride));
+            T13 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 7) * frefstride));
+            T13 = _mm_unpacklo_epi32(T12, T13);
+            R03 = _mm_unpacklo_epi64(T11, T13);
+
+            T20 = _mm_sad_epu8(R00, R01);
+            T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+            sum0 = _mm_add_epi32(sum0, T20);
+
+            T20 = _mm_sad_epu8(R00, R02);
+            T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+            sum1 = _mm_add_epi32(sum1, T20);
+
+            T20 = _mm_sad_epu8(R00, R03);
+            T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+            sum2 = _mm_add_epi32(sum2, T20);
+        }
+    }
+    else
+    {
+        for (int i = 0; i < ly; i += 4)
+        {
+            T00 = _mm_loadl_epi64((__m128i*)(fenc + (i + 0) * FENC_STRIDE));
+            T01 = _mm_loadl_epi64((__m128i*)(fenc + (i + 1) * FENC_STRIDE));
+            T01 = _mm_unpacklo_epi32(T00, T01);
+            T02 = _mm_loadl_epi64((__m128i*)(fenc + (i + 2) * FENC_STRIDE));
+            T03 = _mm_loadl_epi64((__m128i*)(fenc + (i + 3) * FENC_STRIDE));
+            T03 = _mm_unpacklo_epi32(T02, T03);
+            R00 = _mm_unpacklo_epi64(T01, T03);
+
+            T10 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 0) * frefstride));
+            T11 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 1) * frefstride));
+            T11 = _mm_unpacklo_epi32(T10, T11);
+            T12 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 2) * frefstride));
+            T13 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 3) * frefstride));
+            T13 = _mm_unpacklo_epi32(T12, T13);
+            R01 = _mm_unpacklo_epi64(T11, T13);
+
+            T10 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 0) * frefstride));
+            T11 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 1) * frefstride));
+            T11 = _mm_unpacklo_epi32(T10, T11);
+            T12 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 2) * frefstride));
+            T13 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 3) * frefstride));
+            T13 = _mm_unpacklo_epi32(T12, T13);
+            R02 = _mm_unpacklo_epi64(T11, T13);
+
+            T10 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 0) * frefstride));
+            T11 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 1) * frefstride));
+            T11 = _mm_unpacklo_epi32(T10, T11);
+            T12 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 2) * frefstride));
+            T13 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 3) * frefstride));
+            T13 = _mm_unpacklo_epi32(T12, T13);
+            R03 = _mm_unpacklo_epi64(T11, T13);
+
+            T20 = _mm_sad_epu8(R00, R01);
+            T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+            sum0 = _mm_add_epi32(sum0, T20);
+
+            T20 = _mm_sad_epu8(R00, R02);
+            T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+            sum1 = _mm_add_epi32(sum1, T20);
+
+            T20 = _mm_sad_epu8(R00, R03);
+            T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+            sum2 = _mm_add_epi32(sum2, T20);
+        }
+    }
+
+    res[0] = _mm_cvtsi128_si32(sum0);
+    res[1] = _mm_cvtsi128_si32(sum1);
+    res[2] = _mm_cvtsi128_si32(sum2);
+}
+
+#endif /* if HAVE_MMX */
+
+#if HAVE_MMX
+template<int ly>
+void sad_x3_8(pixel *fenc, pixel *fref1, pixel *fref2, pixel *fref3, intptr_t frefstride, int *res)
+{
+    assert((ly % 4) == 0);
+
     __m64 sum0 = _mm_setzero_si64();
     __m64 sum1 = _mm_setzero_si64();
     __m64 sum2 = _mm_setzero_si64();
@@ -1343,6 +2333,2062 @@
 
     if (ly == 4)
     {
+        T00 = (*(__m64*)(fenc + 0 * FENC_STRIDE));
+        T01 = (*(__m64*)(fenc + 1 * FENC_STRIDE));
+        T02 = (*(__m64*)(fenc + 2 * FENC_STRIDE));
+        T03 = (*(__m64*)(fenc + 3 * FENC_STRIDE));
+
+        T10 = (*(__m64*)(fref1 + 0 * frefstride));
+        T11 = (*(__m64*)(fref1 + 1 * frefstride));
+        T12 = (*(__m64*)(fref1 + 2 * frefstride));
+        T13 = (*(__m64*)(fref1 + 3 * frefstride));
+
+        T20 = _mm_sad_pu8(T00, T10);
+        T21 = _mm_sad_pu8(T01, T11);
+        T22 = _mm_sad_pu8(T02, T12);
+        T23 = _mm_sad_pu8(T03, T13);
+
+        sum0 = _mm_add_pi16(sum0, T20);
+        sum0 = _mm_add_pi16(sum0, T21);
+        sum0 = _mm_add_pi16(sum0, T22);
+        sum0 = _mm_add_pi16(sum0, T23);
+
+        T10 = (*(__m64*)(fref2 + 0 * frefstride));
+        T11 = (*(__m64*)(fref2 + 1 * frefstride));
+        T12 = (*(__m64*)(fref2 + 2 * frefstride));
+        T13 = (*(__m64*)(fref2 + 3 * frefstride));
+
+        T20 = _mm_sad_pu8(T00, T10);
+        T21 = _mm_sad_pu8(T01, T11);
+        T22 = _mm_sad_pu8(T02, T12);
+        T23 = _mm_sad_pu8(T03, T13);
+
+        sum1 = _mm_add_pi16(sum1, T20);
+        sum1 = _mm_add_pi16(sum1, T21);
+        sum1 = _mm_add_pi16(sum1, T22);
+        sum1 = _mm_add_pi16(sum1, T23);
+
+        T10 = (*(__m64*)(fref3 + 0 * frefstride));
+        T11 = (*(__m64*)(fref3 + 1 * frefstride));
+        T12 = (*(__m64*)(fref3 + 2 * frefstride));
+        T13 = (*(__m64*)(fref3 + 3 * frefstride));
+
+        T20 = _mm_sad_pu8(T00, T10);
+        T21 = _mm_sad_pu8(T01, T11);
+        T22 = _mm_sad_pu8(T02, T12);
+        T23 = _mm_sad_pu8(T03, T13);
+
+        sum2 = _mm_add_pi16(sum2, T20);
+        sum2 = _mm_add_pi16(sum2, T21);
+        sum2 = _mm_add_pi16(sum2, T22);
+        sum2 = _mm_add_pi16(sum2, T23);
+    }
+    else if (ly == 8)
+    {
+        T00 = (*(__m64*)(fenc + 0 * FENC_STRIDE));
+        T01 = (*(__m64*)(fenc + 1 * FENC_STRIDE));
+        T02 = (*(__m64*)(fenc + 2 * FENC_STRIDE));
+        T03 = (*(__m64*)(fenc + 3 * FENC_STRIDE));
+        T04 = (*(__m64*)(fenc + 4 * FENC_STRIDE));
+        T05 = (*(__m64*)(fenc + 5 * FENC_STRIDE));
+        T06 = (*(__m64*)(fenc + 6 * FENC_STRIDE));
+        T07 = (*(__m64*)(fenc + 7 * FENC_STRIDE));
+
+        T10 = (*(__m64*)(fref1 + 0 * frefstride));
+        T11 = (*(__m64*)(fref1 + 1 * frefstride));
+        T12 = (*(__m64*)(fref1 + 2 * frefstride));
+        T13 = (*(__m64*)(fref1 + 3 * frefstride));
+        T14 = (*(__m64*)(fref1 + 4 * frefstride));
+        T15 = (*(__m64*)(fref1 + 5 * frefstride));
+        T16 = (*(__m64*)(fref1 + 6 * frefstride));
+        T17 = (*(__m64*)(fref1 + 7 * frefstride));
+
+        T20 = _mm_sad_pu8(T00, T10);
+        T21 = _mm_sad_pu8(T01, T11);
+        T22 = _mm_sad_pu8(T02, T12);
+        T23 = _mm_sad_pu8(T03, T13);
+        T24 = _mm_sad_pu8(T04, T14);
+        T25 = _mm_sad_pu8(T05, T15);
+        T26 = _mm_sad_pu8(T06, T16);
+        T27 = _mm_sad_pu8(T07, T17);
+
+        sum0 = _mm_add_pi16(sum0, T20);
+        sum0 = _mm_add_pi16(sum0, T21);
+        sum0 = _mm_add_pi16(sum0, T22);
+        sum0 = _mm_add_pi16(sum0, T23);
+        sum0 = _mm_add_pi16(sum0, T24);
+        sum0 = _mm_add_pi16(sum0, T25);
+        sum0 = _mm_add_pi16(sum0, T26);
+        sum0 = _mm_add_pi16(sum0, T27);
+
+        T10 = (*(__m64*)(fref2 + 0 * frefstride));
+        T11 = (*(__m64*)(fref2 + 1 * frefstride));
+        T12 = (*(__m64*)(fref2 + 2 * frefstride));
+        T13 = (*(__m64*)(fref2 + 3 * frefstride));
+        T14 = (*(__m64*)(fref2 + 4 * frefstride));
+        T15 = (*(__m64*)(fref2 + 5 * frefstride));
+        T16 = (*(__m64*)(fref2 + 6 * frefstride));
+        T17 = (*(__m64*)(fref2 + 7 * frefstride));
+
+        T20 = _mm_sad_pu8(T00, T10);
+        T21 = _mm_sad_pu8(T01, T11);
+        T22 = _mm_sad_pu8(T02, T12);
+        T23 = _mm_sad_pu8(T03, T13);
+        T24 = _mm_sad_pu8(T04, T14);
+        T25 = _mm_sad_pu8(T05, T15);
+        T26 = _mm_sad_pu8(T06, T16);
+        T27 = _mm_sad_pu8(T07, T17);
+
+        sum1 = _mm_add_pi16(sum1, T20);
+        sum1 = _mm_add_pi16(sum1, T21);
+        sum1 = _mm_add_pi16(sum1, T22);
+        sum1 = _mm_add_pi16(sum1, T23);
+        sum1 = _mm_add_pi16(sum1, T24);
+        sum1 = _mm_add_pi16(sum1, T25);
+        sum1 = _mm_add_pi16(sum1, T26);
+        sum1 = _mm_add_pi16(sum1, T27);
+
+        T10 = (*(__m64*)(fref3 + 0 * frefstride));
+        T11 = (*(__m64*)(fref3 + 1 * frefstride));
+        T12 = (*(__m64*)(fref3 + 2 * frefstride));
+        T13 = (*(__m64*)(fref3 + 3 * frefstride));
+        T14 = (*(__m64*)(fref3 + 4 * frefstride));
+        T15 = (*(__m64*)(fref3 + 5 * frefstride));
+        T16 = (*(__m64*)(fref3 + 6 * frefstride));
+        T17 = (*(__m64*)(fref3 + 7 * frefstride));
+
+        T20 = _mm_sad_pu8(T00, T10);
+        T21 = _mm_sad_pu8(T01, T11);
+        T22 = _mm_sad_pu8(T02, T12);
+        T23 = _mm_sad_pu8(T03, T13);
+        T24 = _mm_sad_pu8(T04, T14);
+        T25 = _mm_sad_pu8(T05, T15);
+        T26 = _mm_sad_pu8(T06, T16);
+        T27 = _mm_sad_pu8(T07, T17);
+
+        sum2 = _mm_add_pi16(sum2, T20);
+        sum2 = _mm_add_pi16(sum2, T21);
+        sum2 = _mm_add_pi16(sum2, T22);
+        sum2 = _mm_add_pi16(sum2, T23);
+        sum2 = _mm_add_pi16(sum2, T24);
+        sum2 = _mm_add_pi16(sum2, T25);
+        sum2 = _mm_add_pi16(sum2, T26);
+        sum2 = _mm_add_pi16(sum2, T27);
+    }
+    else if (ly == 16)
+    {
+        T00 = (*(__m64*)(fenc + 0 * FENC_STRIDE));
+        T01 = (*(__m64*)(fenc + 1 * FENC_STRIDE));
+        T02 = (*(__m64*)(fenc + 2 * FENC_STRIDE));
+        T03 = (*(__m64*)(fenc + 3 * FENC_STRIDE));
+        T04 = (*(__m64*)(fenc + 4 * FENC_STRIDE));
+        T05 = (*(__m64*)(fenc + 5 * FENC_STRIDE));
+        T06 = (*(__m64*)(fenc + 6 * FENC_STRIDE));
+        T07 = (*(__m64*)(fenc + 7 * FENC_STRIDE));
+        T0 = (*(__m64*)(fenc +  8 * FENC_STRIDE));
+        T1 = (*(__m64*)(fenc +  9 * FENC_STRIDE));
+        T2 = (*(__m64*)(fenc +  10 * FENC_STRIDE));
+        T3 = (*(__m64*)(fenc +  11 * FENC_STRIDE));
+        T4 = (*(__m64*)(fenc + 12 * FENC_STRIDE));
+        T5 = (*(__m64*)(fenc + 13 * FENC_STRIDE));
+        T6 = (*(__m64*)(fenc + 14 * FENC_STRIDE));
+        T7 = (*(__m64*)(fenc + 15 * FENC_STRIDE));
+
+        T10 = (*(__m64*)(fref1 + 0 * frefstride));
+        T11 = (*(__m64*)(fref1 + 1 * frefstride));
+        T12 = (*(__m64*)(fref1 + 2 * frefstride));
+        T13 = (*(__m64*)(fref1 + 3 * frefstride));
+        T14 = (*(__m64*)(fref1 + 4 * frefstride));
+        T15 = (*(__m64*)(fref1 + 5 * frefstride));
+        T16 = (*(__m64*)(fref1 + 6 * frefstride));
+        T17 = (*(__m64*)(fref1 + 7 * frefstride));
+
+        T20 = _mm_sad_pu8(T00, T10);
+        T21 = _mm_sad_pu8(T01, T11);
+        T22 = _mm_sad_pu8(T02, T12);
+        T23 = _mm_sad_pu8(T03, T13);
+        T24 = _mm_sad_pu8(T04, T14);
+        T25 = _mm_sad_pu8(T05, T15);
+        T26 = _mm_sad_pu8(T06, T16);
+        T27 = _mm_sad_pu8(T07, T17);
+
+        sum0 = _mm_add_pi16(sum0, T20);
+        sum0 = _mm_add_pi16(sum0, T21);
+        sum0 = _mm_add_pi16(sum0, T22);
+        sum0 = _mm_add_pi16(sum0, T23);
+        sum0 = _mm_add_pi16(sum0, T24);
+        sum0 = _mm_add_pi16(sum0, T25);
+        sum0 = _mm_add_pi16(sum0, T26);
+        sum0 = _mm_add_pi16(sum0, T27);
+
+        T10 = (*(__m64*)(fref1 + 8 * frefstride));
+        T11 = (*(__m64*)(fref1 + 9 * frefstride));
+        T12 = (*(__m64*)(fref1 + 10 * frefstride));
+        T13 = (*(__m64*)(fref1 + 11 * frefstride));
+        T14 = (*(__m64*)(fref1 + 12 * frefstride));
+        T15 = (*(__m64*)(fref1 + 13 * frefstride));
+        T16 = (*(__m64*)(fref1 + 14 * frefstride));
+        T17 = (*(__m64*)(fref1 + 15 * frefstride));
+
+        T20 = _mm_sad_pu8(T0, T10);
+        T21 = _mm_sad_pu8(T1, T11);
+        T22 = _mm_sad_pu8(T2, T12);
+        T23 = _mm_sad_pu8(T3, T13);
+        T24 = _mm_sad_pu8(T4, T14);
+        T25 = _mm_sad_pu8(T5, T15);
+        T26 = _mm_sad_pu8(T6, T16);
+        T27 = _mm_sad_pu8(T7, T17);
+
+        sum0 = _mm_add_pi16(sum0, T20);
+        sum0 = _mm_add_pi16(sum0, T21);
+        sum0 = _mm_add_pi16(sum0, T22);
+        sum0 = _mm_add_pi16(sum0, T23);
+        sum0 = _mm_add_pi16(sum0, T24);
+        sum0 = _mm_add_pi16(sum0, T25);
+        sum0 = _mm_add_pi16(sum0, T26);
+        sum0 = _mm_add_pi16(sum0, T27);
+
+        T10 = (*(__m64*)(fref2 + 0 * frefstride));
+        T11 = (*(__m64*)(fref2 + 1 * frefstride));
+        T12 = (*(__m64*)(fref2 + 2 * frefstride));
+        T13 = (*(__m64*)(fref2 + 3 * frefstride));
+        T14 = (*(__m64*)(fref2 + 4 * frefstride));
+        T15 = (*(__m64*)(fref2 + 5 * frefstride));
+        T16 = (*(__m64*)(fref2 + 6 * frefstride));
+        T17 = (*(__m64*)(fref2 + 7 * frefstride));
+
+        T20 = _mm_sad_pu8(T00, T10);
+        T21 = _mm_sad_pu8(T01, T11);
+        T22 = _mm_sad_pu8(T02, T12);
+        T23 = _mm_sad_pu8(T03, T13);
+        T24 = _mm_sad_pu8(T04, T14);
+        T25 = _mm_sad_pu8(T05, T15);
+        T26 = _mm_sad_pu8(T06, T16);
+        T27 = _mm_sad_pu8(T07, T17);
+
+        sum1 = _mm_add_pi16(sum1, T20);
+        sum1 = _mm_add_pi16(sum1, T21);
+        sum1 = _mm_add_pi16(sum1, T22);
+        sum1 = _mm_add_pi16(sum1, T23);
+        sum1 = _mm_add_pi16(sum1, T24);
+        sum1 = _mm_add_pi16(sum1, T25);
+        sum1 = _mm_add_pi16(sum1, T26);
+        sum1 = _mm_add_pi16(sum1, T27);
+
+        T10 = (*(__m64*)(fref2 + 8 * frefstride));
+        T11 = (*(__m64*)(fref2 + 9 * frefstride));
+        T12 = (*(__m64*)(fref2 + 10 * frefstride));
+        T13 = (*(__m64*)(fref2 + 11 * frefstride));
+        T14 = (*(__m64*)(fref2 + 12 * frefstride));
+        T15 = (*(__m64*)(fref2 + 13 * frefstride));
+        T16 = (*(__m64*)(fref2 + 14 * frefstride));
+        T17 = (*(__m64*)(fref2 + 15 * frefstride));
+
+        T20 = _mm_sad_pu8(T0, T10);
+        T21 = _mm_sad_pu8(T1, T11);
+        T22 = _mm_sad_pu8(T2, T12);
+        T23 = _mm_sad_pu8(T3, T13);
+        T24 = _mm_sad_pu8(T4, T14);
+        T25 = _mm_sad_pu8(T5, T15);
+        T26 = _mm_sad_pu8(T6, T16);
+        T27 = _mm_sad_pu8(T7, T17);
+
+        sum1 = _mm_add_pi16(sum1, T20);
+        sum1 = _mm_add_pi16(sum1, T21);
+        sum1 = _mm_add_pi16(sum1, T22);
+        sum1 = _mm_add_pi16(sum1, T23);
+        sum1 = _mm_add_pi16(sum1, T24);
+        sum1 = _mm_add_pi16(sum1, T25);
+        sum1 = _mm_add_pi16(sum1, T26);
+        sum1 = _mm_add_pi16(sum1, T27);
+
+        T10 = (*(__m64*)(fref3 + 0 * frefstride));
+        T11 = (*(__m64*)(fref3 + 1 * frefstride));
+        T12 = (*(__m64*)(fref3 + 2 * frefstride));
+        T13 = (*(__m64*)(fref3 + 3 * frefstride));
+        T14 = (*(__m64*)(fref3 + 4 * frefstride));
+        T15 = (*(__m64*)(fref3 + 5 * frefstride));
+        T16 = (*(__m64*)(fref3 + 6 * frefstride));
+        T17 = (*(__m64*)(fref3 + 7 * frefstride));
+
+        T20 = _mm_sad_pu8(T00, T10);
+        T21 = _mm_sad_pu8(T01, T11);
+        T22 = _mm_sad_pu8(T02, T12);
+        T23 = _mm_sad_pu8(T03, T13);
+        T24 = _mm_sad_pu8(T04, T14);
+        T25 = _mm_sad_pu8(T05, T15);
+        T26 = _mm_sad_pu8(T06, T16);
+        T27 = _mm_sad_pu8(T07, T17);
+
+        sum2 = _mm_add_pi16(sum2, T20);
+        sum2 = _mm_add_pi16(sum2, T21);
+        sum2 = _mm_add_pi16(sum2, T22);
+        sum2 = _mm_add_pi16(sum2, T23);
+        sum2 = _mm_add_pi16(sum2, T24);
+        sum2 = _mm_add_pi16(sum2, T25);
+        sum2 = _mm_add_pi16(sum2, T26);
+        sum2 = _mm_add_pi16(sum2, T27);
+
+        T10 = (*(__m64*)(fref3 + 8 * frefstride));
+        T11 = (*(__m64*)(fref3 + 9 * frefstride));
+        T12 = (*(__m64*)(fref3 + 10 * frefstride));
+        T13 = (*(__m64*)(fref3 + 11 * frefstride));
+        T14 = (*(__m64*)(fref3 + 12 * frefstride));
+        T15 = (*(__m64*)(fref3 + 13 * frefstride));
+        T16 = (*(__m64*)(fref3 + 14 * frefstride));
+        T17 = (*(__m64*)(fref3 + 15 * frefstride));
+
+        T20 = _mm_sad_pu8(T0, T10);
+        T21 = _mm_sad_pu8(T1, T11);
+        T22 = _mm_sad_pu8(T2, T12);
+        T23 = _mm_sad_pu8(T3, T13);
+        T24 = _mm_sad_pu8(T4, T14);
+        T25 = _mm_sad_pu8(T5, T15);
+        T26 = _mm_sad_pu8(T6, T16);
+        T27 = _mm_sad_pu8(T7, T17);
+
+        sum2 = _mm_add_pi16(sum2, T20);
+        sum2 = _mm_add_pi16(sum2, T21);
+        sum2 = _mm_add_pi16(sum2, T22);
+        sum2 = _mm_add_pi16(sum2, T23);
+        sum2 = _mm_add_pi16(sum2, T24);
+        sum2 = _mm_add_pi16(sum2, T25);
+        sum2 = _mm_add_pi16(sum2, T26);
+        sum2 = _mm_add_pi16(sum2, T27);
+    }
+    else if ((ly % 8) == 0)
+    {
+        for (int i = 0; i < ly; i += 8)
+        {
+            T00 = (*(__m64*)(fenc + (i + 0) * FENC_STRIDE));
+            T01 = (*(__m64*)(fenc + (i + 1) * FENC_STRIDE));
+            T02 = (*(__m64*)(fenc + (i + 2) * FENC_STRIDE));
+            T03 = (*(__m64*)(fenc + (i + 3) * FENC_STRIDE));
+            T04 = (*(__m64*)(fenc + (i + 4) * FENC_STRIDE));
+            T05 = (*(__m64*)(fenc + (i + 5) * FENC_STRIDE));
+            T06 = (*(__m64*)(fenc + (i + 6) * FENC_STRIDE));
+            T07 = (*(__m64*)(fenc + (i + 7) * FENC_STRIDE));
+
+            T10 = (*(__m64*)(fref1 + (i + 0) * frefstride));
+            T11 = (*(__m64*)(fref1 + (i + 1) * frefstride));
+            T12 = (*(__m64*)(fref1 + (i + 2) * frefstride));
+            T13 = (*(__m64*)(fref1 + (i + 3) * frefstride));
+            T14 = (*(__m64*)(fref1 + (i + 4) * frefstride));
+            T15 = (*(__m64*)(fref1 + (i + 5) * frefstride));
+            T16 = (*(__m64*)(fref1 + (i + 6) * frefstride));
+            T17 = (*(__m64*)(fref1 + (i + 7) * frefstride));
+
+            T20 = _mm_sad_pu8(T00, T10);
+            T21 = _mm_sad_pu8(T01, T11);
+            T22 = _mm_sad_pu8(T02, T12);
+            T23 = _mm_sad_pu8(T03, T13);
+            T24 = _mm_sad_pu8(T04, T14);
+            T25 = _mm_sad_pu8(T05, T15);
+            T26 = _mm_sad_pu8(T06, T16);
+            T27 = _mm_sad_pu8(T07, T17);
+
+            sum0 = _mm_add_pi16(sum0, T20);
+            sum0 = _mm_add_pi16(sum0, T21);
+            sum0 = _mm_add_pi16(sum0, T22);
+            sum0 = _mm_add_pi16(sum0, T23);
+            sum0 = _mm_add_pi16(sum0, T24);
+            sum0 = _mm_add_pi16(sum0, T25);
+            sum0 = _mm_add_pi16(sum0, T26);
+            sum0 = _mm_add_pi16(sum0, T27);
+
+            T10 = (*(__m64*)(fref2 + (i + 0) * frefstride));
+            T11 = (*(__m64*)(fref2 + (i + 1) * frefstride));
+            T12 = (*(__m64*)(fref2 + (i + 2) * frefstride));
+            T13 = (*(__m64*)(fref2 + (i + 3) * frefstride));
+            T14 = (*(__m64*)(fref2 + (i + 4) * frefstride));
+            T15 = (*(__m64*)(fref2 + (i + 5) * frefstride));
+            T16 = (*(__m64*)(fref2 + (i + 6) * frefstride));
+            T17 = (*(__m64*)(fref2 + (i + 7) * frefstride));
+
+            T20 = _mm_sad_pu8(T00, T10);
+            T21 = _mm_sad_pu8(T01, T11);
+            T22 = _mm_sad_pu8(T02, T12);
+            T23 = _mm_sad_pu8(T03, T13);
+            T24 = _mm_sad_pu8(T04, T14);
+            T25 = _mm_sad_pu8(T05, T15);
+            T26 = _mm_sad_pu8(T06, T16);
+            T27 = _mm_sad_pu8(T07, T17);
+
+            sum1 = _mm_add_pi16(sum1, T20);
+            sum1 = _mm_add_pi16(sum1, T21);
+            sum1 = _mm_add_pi16(sum1, T22);
+            sum1 = _mm_add_pi16(sum1, T23);
+            sum1 = _mm_add_pi16(sum1, T24);
+            sum1 = _mm_add_pi16(sum1, T25);
+            sum1 = _mm_add_pi16(sum1, T26);
+            sum1 = _mm_add_pi16(sum1, T27);
+
+            T10 = (*(__m64*)(fref3 + (i + 0) * frefstride));
+            T11 = (*(__m64*)(fref3 + (i + 1) * frefstride));
+            T12 = (*(__m64*)(fref3 + (i + 2) * frefstride));
+            T13 = (*(__m64*)(fref3 + (i + 3) * frefstride));
+            T14 = (*(__m64*)(fref3 + (i + 4) * frefstride));
+            T15 = (*(__m64*)(fref3 + (i + 5) * frefstride));
+            T16 = (*(__m64*)(fref3 + (i + 6) * frefstride));
+            T17 = (*(__m64*)(fref3 + (i + 7) * frefstride));
+
+            T20 = _mm_sad_pu8(T00, T10);
+            T21 = _mm_sad_pu8(T01, T11);
+            T22 = _mm_sad_pu8(T02, T12);
+            T23 = _mm_sad_pu8(T03, T13);
+            T24 = _mm_sad_pu8(T04, T14);
+            T25 = _mm_sad_pu8(T05, T15);
+            T26 = _mm_sad_pu8(T06, T16);
+            T27 = _mm_sad_pu8(T07, T17);
+
+            sum2 = _mm_add_pi16(sum2, T20);
+            sum2 = _mm_add_pi16(sum2, T21);
+            sum2 = _mm_add_pi16(sum2, T22);
+            sum2 = _mm_add_pi16(sum2, T23);
+            sum2 = _mm_add_pi16(sum2, T24);
+            sum2 = _mm_add_pi16(sum2, T25);
+            sum2 = _mm_add_pi16(sum2, T26);
+            sum2 = _mm_add_pi16(sum2, T27);
+        }
+    }
+    else
+    {
+        for (int i = 0; i < ly; i += 4)
+        {
+            T00 = (*(__m64*)(fenc + (i + 0) * FENC_STRIDE));
+            T01 = (*(__m64*)(fenc + (i + 1) * FENC_STRIDE));
+            T02 = (*(__m64*)(fenc + (i + 2) * FENC_STRIDE));
+            T03 = (*(__m64*)(fenc + (i + 3) * FENC_STRIDE));
+
+            T10 = (*(__m64*)(fref1 + (i + 0) * frefstride));
+            T11 = (*(__m64*)(fref1 + (i + 1) * frefstride));
+            T12 = (*(__m64*)(fref1 + (i + 2) * frefstride));
+            T13 = (*(__m64*)(fref1 + (i + 3) * frefstride));
+
+            T20 = _mm_sad_pu8(T00, T10);
+            T21 = _mm_sad_pu8(T01, T11);
+            T22 = _mm_sad_pu8(T02, T12);
+            T23 = _mm_sad_pu8(T03, T13);
+
+            sum0 = _mm_add_pi16(sum0, T20);
+            sum0 = _mm_add_pi16(sum0, T21);
+            sum0 = _mm_add_pi16(sum0, T22);
+            sum0 = _mm_add_pi16(sum0, T23);
+
+            T10 = (*(__m64*)(fref2 + (i + 0) * frefstride));
+            T11 = (*(__m64*)(fref2 + (i + 1) * frefstride));
+            T12 = (*(__m64*)(fref2 + (i + 2) * frefstride));
+            T13 = (*(__m64*)(fref2 + (i + 3) * frefstride));
+
+            T20 = _mm_sad_pu8(T00, T10);
+            T21 = _mm_sad_pu8(T01, T11);
+            T22 = _mm_sad_pu8(T02, T12);
+            T23 = _mm_sad_pu8(T03, T13);
+
+            sum1 = _mm_add_pi16(sum1, T20);
+            sum1 = _mm_add_pi16(sum1, T21);
+            sum1 = _mm_add_pi16(sum1, T22);
+            sum1 = _mm_add_pi16(sum1, T23);
+
+            T10 = (*(__m64*)(fref3 + (i + 0) * frefstride));
+            T11 = (*(__m64*)(fref3 + (i + 1) * frefstride));
+            T12 = (*(__m64*)(fref3 + (i + 2) * frefstride));
+            T13 = (*(__m64*)(fref3 + (i + 3) * frefstride));
+
+            T20 = _mm_sad_pu8(T00, T10);
+            T21 = _mm_sad_pu8(T01, T11);
+            T22 = _mm_sad_pu8(T02, T12);
+            T23 = _mm_sad_pu8(T03, T13);
+
+            sum2 = _mm_add_pi16(sum2, T20);
+            sum2 = _mm_add_pi16(sum2, T21);
+            sum2 = _mm_add_pi16(sum2, T22);
+            sum2 = _mm_add_pi16(sum2, T23);
+        }
+    }
+
+    res[0] = _m_to_int(sum0);
+    res[1] = _m_to_int(sum1);
+    res[2] = _m_to_int(sum2);
+}
+
+#else /* if HAVE_MMX */
+
+template<int ly>
+void sad_x3_8(pixel *fenc, pixel *fref1, pixel *fref2, pixel *fref3, intptr_t frefstride, int *res)
+{
+    assert((ly % 4) == 0);
+    __m128i sum0 = _mm_setzero_si128();
+
+    __m128i T00, T01, T02, T03;
+    __m128i T10, T11, T12, T13;
+    __m128i T20, T21;
+
+    if (ly == 4)
+    {
+        T00 = _mm_loadl_epi64((__m128i*)(fenc + (0) * FENC_STRIDE));
+        T01 = _mm_loadl_epi64((__m128i*)(fenc + (1) * FENC_STRIDE));
+        T01 = _mm_unpacklo_epi64(T00, T01);
+        T02 = _mm_loadl_epi64((__m128i*)(fenc + (2) * FENC_STRIDE));
+        T03 = _mm_loadl_epi64((__m128i*)(fenc + (3) * FENC_STRIDE));
+        T03 = _mm_unpacklo_epi64(T02, T03);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref1 + (0) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref1 + (1) * frefstride));
+        T11 = _mm_unpacklo_epi64(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (2) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (3) * frefstride));
+        T13 = _mm_unpacklo_epi64(T12, T13);
+
+        T20 = _mm_sad_epu8(T01, T11);
+        T21 = _mm_sad_epu8(T03, T13);
+        T21 = _mm_add_epi32(T20, T21);
+        sum0 = _mm_shuffle_epi32(T21, 2);
+        sum0 = _mm_add_epi32(sum0, T21);
+        res[0] = _mm_cvtsi128_si32(sum0);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref2 + (0) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref2 + (1) * frefstride));
+        T11 = _mm_unpacklo_epi64(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (2) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (3) * frefstride));
+        T13 = _mm_unpacklo_epi64(T12, T13);
+
+        T20 = _mm_sad_epu8(T01, T11);
+        T21 = _mm_sad_epu8(T03, T13);
+        T21 = _mm_add_epi32(T20, T21);
+        sum0 = _mm_shuffle_epi32(T21, 2);
+        sum0 = _mm_add_epi32(sum0, T21);
+        res[1] = _mm_cvtsi128_si32(sum0);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref3 + (0) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref3 + (1) * frefstride));
+        T11 = _mm_unpacklo_epi64(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (2) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (3) * frefstride));
+        T13 = _mm_unpacklo_epi64(T12, T13);
+
+        T20 = _mm_sad_epu8(T01, T11);
+        T21 = _mm_sad_epu8(T03, T13);
+        T21 = _mm_add_epi32(T20, T21);
+        sum0 = _mm_shuffle_epi32(T21, 2);
+        sum0 = _mm_add_epi32(sum0, T21);
+        res[2] = _mm_cvtsi128_si32(sum0);
+    }
+    else if (ly == 8)
+    {
+        T00 = _mm_loadl_epi64((__m128i*)(fenc + (0) * FENC_STRIDE));
+        T01 = _mm_loadl_epi64((__m128i*)(fenc + (1) * FENC_STRIDE));
+        T01 = _mm_unpacklo_epi64(T00, T01);
+        T02 = _mm_loadl_epi64((__m128i*)(fenc + (2) * FENC_STRIDE));
+        T03 = _mm_loadl_epi64((__m128i*)(fenc + (3) * FENC_STRIDE));
+        T03 = _mm_unpacklo_epi64(T02, T03);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref1 + (0) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref1 + (1) * frefstride));
+        T11 = _mm_unpacklo_epi64(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (2) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (3) * frefstride));
+        T13 = _mm_unpacklo_epi64(T12, T13);
+
+        T20 = _mm_sad_epu8(T01, T11);
+        T21 = _mm_sad_epu8(T03, T13);
+        T21 = _mm_add_epi32(T20, T21);
+        sum0 = _mm_shuffle_epi32(T21, 2);
+        sum0 = _mm_add_epi32(sum0, T21);
+        res[0] = _mm_cvtsi128_si32(sum0);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref2 + (0) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref2 + (1) * frefstride));
+        T11 = _mm_unpacklo_epi64(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (2) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (3) * frefstride));
+        T13 = _mm_unpacklo_epi64(T12, T13);
+
+        T20 = _mm_sad_epu8(T01, T11);
+        T21 = _mm_sad_epu8(T03, T13);
+        T21 = _mm_add_epi32(T20, T21);
+        sum0 = _mm_shuffle_epi32(T21, 2);
+        sum0 = _mm_add_epi32(sum0, T21);
+        res[1] = _mm_cvtsi128_si32(sum0);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref3 + (0) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref3 + (1) * frefstride));
+        T11 = _mm_unpacklo_epi64(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (2) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (3) * frefstride));
+        T13 = _mm_unpacklo_epi64(T12, T13);
+
+        T20 = _mm_sad_epu8(T01, T11);
+        T21 = _mm_sad_epu8(T03, T13);
+        T21 = _mm_add_epi32(T20, T21);
+        sum0 = _mm_shuffle_epi32(T21, 2);
+        sum0 = _mm_add_epi32(sum0, T21);
+        res[2] = _mm_cvtsi128_si32(sum0);
+
+        T00 = _mm_loadl_epi64((__m128i*)(fenc + (4) * FENC_STRIDE));
+        T01 = _mm_loadl_epi64((__m128i*)(fenc + (5) * FENC_STRIDE));
+        T01 = _mm_unpacklo_epi64(T00, T01);
+        T02 = _mm_loadl_epi64((__m128i*)(fenc + (6) * FENC_STRIDE));
+        T03 = _mm_loadl_epi64((__m128i*)(fenc + (7) * FENC_STRIDE));
+        T03 = _mm_unpacklo_epi64(T02, T03);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref1 + (4) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref1 + (5) * frefstride));
+        T11 = _mm_unpacklo_epi64(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (6) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (7) * frefstride));
+        T13 = _mm_unpacklo_epi64(T12, T13);
+
+        T20 = _mm_sad_epu8(T01, T11);
+        T21 = _mm_sad_epu8(T03, T13);
+        T21 = _mm_add_epi32(T20, T21);
+        sum0 = _mm_shuffle_epi32(T21, 2);
+        sum0 = _mm_add_epi32(sum0, T21);
+        res[0] = res[0] + _mm_cvtsi128_si32(sum0);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref2 + (4) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref2 + (5) * frefstride));
+        T11 = _mm_unpacklo_epi64(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (6) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (7) * frefstride));
+        T13 = _mm_unpacklo_epi64(T12, T13);
+
+        T20 = _mm_sad_epu8(T01, T11);
+        T21 = _mm_sad_epu8(T03, T13);
+        T21 = _mm_add_epi32(T20, T21);
+        sum0 = _mm_shuffle_epi32(T21, 2);
+        sum0 = _mm_add_epi32(sum0, T21);
+        res[1] = res[1] + _mm_cvtsi128_si32(sum0);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref3 + (4) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref3 + (5) * frefstride));
+        T11 = _mm_unpacklo_epi64(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (6) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (7) * frefstride));
+        T13 = _mm_unpacklo_epi64(T12, T13);
+
+        T20 = _mm_sad_epu8(T01, T11);
+        T21 = _mm_sad_epu8(T03, T13);
+        T21 = _mm_add_epi32(T20, T21);
+        sum0 = _mm_shuffle_epi32(T21, 2);
+        sum0 = _mm_add_epi32(sum0, T21);
+        res[2] = res[2] + _mm_cvtsi128_si32(sum0);
+    }
+    else if (ly == 16)
+    {
+        T00 = _mm_loadl_epi64((__m128i*)(fenc + (0) * FENC_STRIDE));
+        T01 = _mm_loadl_epi64((__m128i*)(fenc + (1) * FENC_STRIDE));
+        T01 = _mm_unpacklo_epi64(T00, T01);
+        T02 = _mm_loadl_epi64((__m128i*)(fenc + (2) * FENC_STRIDE));
+        T03 = _mm_loadl_epi64((__m128i*)(fenc + (3) * FENC_STRIDE));
+        T03 = _mm_unpacklo_epi64(T02, T03);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref1 + (0) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref1 + (1) * frefstride));
+        T11 = _mm_unpacklo_epi64(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (2) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (3) * frefstride));
+        T13 = _mm_unpacklo_epi64(T12, T13);
+
+        T20 = _mm_sad_epu8(T01, T11);
+        T21 = _mm_sad_epu8(T03, T13);
+        T21 = _mm_add_epi32(T20, T21);
+        sum0 = _mm_shuffle_epi32(T21, 2);
+        sum0 = _mm_add_epi32(sum0, T21);
+        res[0] = _mm_cvtsi128_si32(sum0);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref2 + (0) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref2 + (1) * frefstride));
+        T11 = _mm_unpacklo_epi64(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (2) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (3) * frefstride));
+        T13 = _mm_unpacklo_epi64(T12, T13);
+
+        T20 = _mm_sad_epu8(T01, T11);
+        T21 = _mm_sad_epu8(T03, T13);
+        T21 = _mm_add_epi32(T20, T21);
+        sum0 = _mm_shuffle_epi32(T21, 2);
+        sum0 = _mm_add_epi32(sum0, T21);
+        res[1] = _mm_cvtsi128_si32(sum0);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref3 + (0) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref3 + (1) * frefstride));
+        T11 = _mm_unpacklo_epi64(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (2) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (3) * frefstride));
+        T13 = _mm_unpacklo_epi64(T12, T13);
+
+        T20 = _mm_sad_epu8(T01, T11);
+        T21 = _mm_sad_epu8(T03, T13);
+        T21 = _mm_add_epi32(T20, T21);
+        sum0 = _mm_shuffle_epi32(T21, 2);
+        sum0 = _mm_add_epi32(sum0, T21);
+        res[2] = _mm_cvtsi128_si32(sum0);
+
+        T00 = _mm_loadl_epi64((__m128i*)(fenc + (4) * FENC_STRIDE));
+        T01 = _mm_loadl_epi64((__m128i*)(fenc + (5) * FENC_STRIDE));
+        T01 = _mm_unpacklo_epi64(T00, T01);
+        T02 = _mm_loadl_epi64((__m128i*)(fenc + (6) * FENC_STRIDE));
+        T03 = _mm_loadl_epi64((__m128i*)(fenc + (7) * FENC_STRIDE));
+        T03 = _mm_unpacklo_epi64(T02, T03);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref1 + (4) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref1 + (5) * frefstride));
+        T11 = _mm_unpacklo_epi64(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (6) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (7) * frefstride));
+        T13 = _mm_unpacklo_epi64(T12, T13);
+
+        T20 = _mm_sad_epu8(T01, T11);
+        T21 = _mm_sad_epu8(T03, T13);
+        T21 = _mm_add_epi32(T20, T21);
+        sum0 = _mm_shuffle_epi32(T21, 2);
+        sum0 = _mm_add_epi32(sum0, T21);
+        res[0] = res[0] + _mm_cvtsi128_si32(sum0);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref2 + (4) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref2 + (5) * frefstride));
+        T11 = _mm_unpacklo_epi64(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (6) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (7) * frefstride));
+        T13 = _mm_unpacklo_epi64(T12, T13);
+
+        T20 = _mm_sad_epu8(T01, T11);
+        T21 = _mm_sad_epu8(T03, T13);
+        T21 = _mm_add_epi32(T20, T21);
+        sum0 = _mm_shuffle_epi32(T21, 2);
+        sum0 = _mm_add_epi32(sum0, T21);
+        res[1] = res[1] + _mm_cvtsi128_si32(sum0);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref3 + (4) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref3 + (5) * frefstride));
+        T11 = _mm_unpacklo_epi64(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (6) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (7) * frefstride));
+        T13 = _mm_unpacklo_epi64(T12, T13);
+
+        T20 = _mm_sad_epu8(T01, T11);
+        T21 = _mm_sad_epu8(T03, T13);
+        T21 = _mm_add_epi32(T20, T21);
+        sum0 = _mm_shuffle_epi32(T21, 2);
+        sum0 = _mm_add_epi32(sum0, T21);
+        res[2] = res[2] + _mm_cvtsi128_si32(sum0);
+
+        T00 = _mm_loadl_epi64((__m128i*)(fenc + (8) * FENC_STRIDE));
+        T01 = _mm_loadl_epi64((__m128i*)(fenc + (9) * FENC_STRIDE));
+        T01 = _mm_unpacklo_epi64(T00, T01);
+        T02 = _mm_loadl_epi64((__m128i*)(fenc + (10) * FENC_STRIDE));
+        T03 = _mm_loadl_epi64((__m128i*)(fenc + (11) * FENC_STRIDE));
+        T03 = _mm_unpacklo_epi64(T02, T03);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref1 + (8) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref1 + (9) * frefstride));
+        T11 = _mm_unpacklo_epi64(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (10) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (11) * frefstride));
+        T13 = _mm_unpacklo_epi64(T12, T13);
+
+        T20 = _mm_sad_epu8(T01, T11);
+        T21 = _mm_sad_epu8(T03, T13);
+        T21 = _mm_add_epi32(T20, T21);
+        sum0 = _mm_shuffle_epi32(T21, 2);
+        sum0 = _mm_add_epi32(sum0, T21);
+        res[0] = res[0] + _mm_cvtsi128_si32(sum0);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref2 + (8) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref2 + (9) * frefstride));
+        T11 = _mm_unpacklo_epi64(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (10) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (11) * frefstride));
+        T13 = _mm_unpacklo_epi64(T12, T13);
+
+        T20 = _mm_sad_epu8(T01, T11);
+        T21 = _mm_sad_epu8(T03, T13);
+        T21 = _mm_add_epi32(T20, T21);
+        sum0 = _mm_shuffle_epi32(T21, 2);
+        sum0 = _mm_add_epi32(sum0, T21);
+        res[1] = res[1] + _mm_cvtsi128_si32(sum0);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref3 + (8) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref3 + (9) * frefstride));
+        T11 = _mm_unpacklo_epi64(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (10) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (11) * frefstride));
+        T13 = _mm_unpacklo_epi64(T12, T13);
+
+        T20 = _mm_sad_epu8(T01, T11);
+        T21 = _mm_sad_epu8(T03, T13);
+        T21 = _mm_add_epi32(T20, T21);
+        sum0 = _mm_shuffle_epi32(T21, 2);
+        sum0 = _mm_add_epi32(sum0, T21);
+        res[2] = res[2] + _mm_cvtsi128_si32(sum0);
+
+        T00 = _mm_loadl_epi64((__m128i*)(fenc + (12) * FENC_STRIDE));
+        T01 = _mm_loadl_epi64((__m128i*)(fenc + (13) * FENC_STRIDE));
+        T01 = _mm_unpacklo_epi64(T00, T01);
+        T02 = _mm_loadl_epi64((__m128i*)(fenc + (14) * FENC_STRIDE));
+        T03 = _mm_loadl_epi64((__m128i*)(fenc + (15) * FENC_STRIDE));
+        T03 = _mm_unpacklo_epi64(T02, T03);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref1 + (12) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref1 + (13) * frefstride));
+        T11 = _mm_unpacklo_epi64(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (14) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (15) * frefstride));
+        T13 = _mm_unpacklo_epi64(T12, T13);
+
+        T20 = _mm_sad_epu8(T01, T11);
+        T21 = _mm_sad_epu8(T03, T13);
+        T21 = _mm_add_epi32(T20, T21);
+        sum0 = _mm_shuffle_epi32(T21, 2);
+        sum0 = _mm_add_epi32(sum0, T21);
+        res[0] = res[0] + _mm_cvtsi128_si32(sum0);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref2 + (12) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref2 + (13) * frefstride));
+        T11 = _mm_unpacklo_epi64(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (14) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (15) * frefstride));
+        T13 = _mm_unpacklo_epi64(T12, T13);
+
+        T20 = _mm_sad_epu8(T01, T11);
+        T21 = _mm_sad_epu8(T03, T13);
+        T21 = _mm_add_epi32(T20, T21);
+        sum0 = _mm_shuffle_epi32(T21, 2);
+        sum0 = _mm_add_epi32(sum0, T21);
+        res[1] = res[1] + _mm_cvtsi128_si32(sum0);
+
+        T10 = _mm_loadl_epi64((__m128i*)(fref3 + (12) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref3 + (13) * frefstride));
+        T11 = _mm_unpacklo_epi64(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (14) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (15) * frefstride));
+        T13 = _mm_unpacklo_epi64(T12, T13);
+
+        T20 = _mm_sad_epu8(T01, T11);
+        T21 = _mm_sad_epu8(T03, T13);
+        T21 = _mm_add_epi32(T20, T21);
+        sum0 = _mm_shuffle_epi32(T21, 2);
+        sum0 = _mm_add_epi32(sum0, T21);
+        res[2] = res[2] + _mm_cvtsi128_si32(sum0);
+    }
+    else if ((ly % 8) == 0)
+    {
+        res[0] = res[1] = res[2] = 0;
+        for (int i = 0; i < ly; i += 8)
+        {
+            T00 = _mm_loadl_epi64((__m128i*)(fenc + (i + 0) * FENC_STRIDE));
+            T01 = _mm_loadl_epi64((__m128i*)(fenc + (i + 1) * FENC_STRIDE));
+            T01 = _mm_unpacklo_epi64(T00, T01);
+            T02 = _mm_loadl_epi64((__m128i*)(fenc + (i + 2) * FENC_STRIDE));
+            T03 = _mm_loadl_epi64((__m128i*)(fenc + (i + 3) * FENC_STRIDE));
+            T03 = _mm_unpacklo_epi64(T02, T03);
+
+            T10 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 0) * frefstride));
+            T11 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 1) * frefstride));
+            T11 = _mm_unpacklo_epi64(T10, T11);
+            T12 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 2) * frefstride));
+            T13 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 3) * frefstride));
+            T13 = _mm_unpacklo_epi64(T12, T13);
+
+            T20 = _mm_sad_epu8(T01, T11);
+            T21 = _mm_sad_epu8(T03, T13);
+            T21 = _mm_add_epi32(T20, T21);
+            sum0 = _mm_shuffle_epi32(T21, 2);
+            sum0 = _mm_add_epi32(sum0, T21);
+            res[0] = res[0] + _mm_cvtsi128_si32(sum0);
+
+            T10 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 0) * frefstride));
+            T11 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 1) * frefstride));
+            T11 = _mm_unpacklo_epi64(T10, T11);
+            T12 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 2) * frefstride));
+            T13 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 3) * frefstride));
+            T13 = _mm_unpacklo_epi64(T12, T13);
+
+            T20 = _mm_sad_epu8(T01, T11);
+            T21 = _mm_sad_epu8(T03, T13);
+            T21 = _mm_add_epi32(T20, T21);
+            sum0 = _mm_shuffle_epi32(T21, 2);
+            sum0 = _mm_add_epi32(sum0, T21);
+            res[1] = res[1] + _mm_cvtsi128_si32(sum0);
+
+            T10 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 0) * frefstride));
+            T11 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 1) * frefstride));
+            T11 = _mm_unpacklo_epi64(T10, T11);
+            T12 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 2) * frefstride));
+            T13 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 3) * frefstride));
+            T13 = _mm_unpacklo_epi64(T12, T13);
+
+            T20 = _mm_sad_epu8(T01, T11);
+            T21 = _mm_sad_epu8(T03, T13);
+            T21 = _mm_add_epi32(T20, T21);
+            sum0 = _mm_shuffle_epi32(T21, 2);
+            sum0 = _mm_add_epi32(sum0, T21);
+            res[2] = res[2] + _mm_cvtsi128_si32(sum0);
+
+            T00 = _mm_loadl_epi64((__m128i*)(fenc + (i + 4) * FENC_STRIDE));
+            T01 = _mm_loadl_epi64((__m128i*)(fenc + (i + 5) * FENC_STRIDE));
+            T01 = _mm_unpacklo_epi64(T00, T01);
+            T02 = _mm_loadl_epi64((__m128i*)(fenc + (i + 6) * FENC_STRIDE));
+            T03 = _mm_loadl_epi64((__m128i*)(fenc + (i + 7) * FENC_STRIDE));
+            T03 = _mm_unpacklo_epi64(T02, T03);
+
+            T10 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 4) * frefstride));
+            T11 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 5) * frefstride));
+            T11 = _mm_unpacklo_epi64(T10, T11);
+            T12 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 6) * frefstride));
+            T13 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 7) * frefstride));
+            T13 = _mm_unpacklo_epi64(T12, T13);
+
+            T20 = _mm_sad_epu8(T01, T11);
+            T21 = _mm_sad_epu8(T03, T13);
+            T21 = _mm_add_epi32(T20, T21);
+            sum0 = _mm_shuffle_epi32(T21, 2);
+            sum0 = _mm_add_epi32(sum0, T21);
+            res[0] = res[0] + _mm_cvtsi128_si32(sum0);
+
+            T10 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 4) * frefstride));
+            T11 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 5) * frefstride));
+            T11 = _mm_unpacklo_epi64(T10, T11);
+            T12 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 6) * frefstride));
+            T13 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 7) * frefstride));
+            T13 = _mm_unpacklo_epi64(T12, T13);
+
+            T20 = _mm_sad_epu8(T01, T11);
+            T21 = _mm_sad_epu8(T03, T13);
+            T21 = _mm_add_epi32(T20, T21);
+            sum0 = _mm_shuffle_epi32(T21, 2);
+            sum0 = _mm_add_epi32(sum0, T21);
+            res[1] = res[1] + _mm_cvtsi128_si32(sum0);
+
+            T10 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 4) * frefstride));
+            T11 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 5) * frefstride));
+            T11 = _mm_unpacklo_epi64(T10, T11);
+            T12 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 6) * frefstride));
+            T13 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 7) * frefstride));
+            T13 = _mm_unpacklo_epi64(T12, T13);
+
+            T20 = _mm_sad_epu8(T01, T11);
+            T21 = _mm_sad_epu8(T03, T13);
+            T21 = _mm_add_epi32(T20, T21);
+            sum0 = _mm_shuffle_epi32(T21, 2);
+            sum0 = _mm_add_epi32(sum0, T21);
+            res[2] = res[2] + _mm_cvtsi128_si32(sum0);
+        }
+    }
+    else
+    {
+        res[0] = res[1] = res[2] = 0;
+        for (int i = 0; i < ly; i += 4)
+        {
+            T00 = _mm_loadl_epi64((__m128i*)(fenc + (i + 0) * FENC_STRIDE));
+            T01 = _mm_loadl_epi64((__m128i*)(fenc + (i + 1) * FENC_STRIDE));
+            T01 = _mm_unpacklo_epi64(T00, T01);
+            T02 = _mm_loadl_epi64((__m128i*)(fenc + (i + 2) * FENC_STRIDE));
+            T03 = _mm_loadl_epi64((__m128i*)(fenc + (i + 3) * FENC_STRIDE));
+            T03 = _mm_unpacklo_epi64(T02, T03);
+
+            T10 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 0) * frefstride));
+            T11 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 1) * frefstride));
+            T11 = _mm_unpacklo_epi64(T10, T11);
+            T12 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 2) * frefstride));
+            T13 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 3) * frefstride));
+            T13 = _mm_unpacklo_epi64(T12, T13);
+
+            T20 = _mm_sad_epu8(T01, T11);
+            T21 = _mm_sad_epu8(T03, T13);
+            T21 = _mm_add_epi32(T20, T21);
+            sum0 = _mm_shuffle_epi32(T21, 2);
+            sum0 = _mm_add_epi32(sum0, T21);
+            res[0] = res[0] + _mm_cvtsi128_si32(sum0);
+
+            T10 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 0) * frefstride));
+            T11 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 1) * frefstride));
+            T11 = _mm_unpacklo_epi64(T10, T11);
+            T12 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 2) * frefstride));
+            T13 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 3) * frefstride));
+            T13 = _mm_unpacklo_epi64(T12, T13);
+
+            T20 = _mm_sad_epu8(T01, T11);
+            T21 = _mm_sad_epu8(T03, T13);
+            T21 = _mm_add_epi32(T20, T21);
+            sum0 = _mm_shuffle_epi32(T21, 2);
+            sum0 = _mm_add_epi32(sum0, T21);
+            res[1] = res[1] + _mm_cvtsi128_si32(sum0);
+
+            T10 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 0) * frefstride));
+            T11 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 1) * frefstride));
+            T11 = _mm_unpacklo_epi64(T10, T11);
+            T12 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 2) * frefstride));
+            T13 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 3) * frefstride));
+            T13 = _mm_unpacklo_epi64(T12, T13);
+
+            T20 = _mm_sad_epu8(T01, T11);
+            T21 = _mm_sad_epu8(T03, T13);
+            T21 = _mm_add_epi32(T20, T21);
+            sum0 = _mm_shuffle_epi32(T21, 2);
+            sum0 = _mm_add_epi32(sum0, T21);
+            res[2] = res[2] + _mm_cvtsi128_si32(sum0);
+        }
+    }
+}
+
+#endif /* if HAVE_MMX */
+#endif /* if INSTRSET >= X265_CPU_LEVEL_SSE41 */
+
+/* For performance - This function assumes that the *last load* can access 16 elements. */
+template<int ly>
+void sad_x3_12(pixel *fenc, pixel *fref1, pixel *fref2, pixel *fref3, intptr_t frefstride, int *res)
+{
+    Vec16uc m1, n1, n2, n3;
+
+    Vec4i sum1(0), sum2(0), sum3(0);
+    Vec8us sad1(0), sad2(0), sad3(0);
+    int max_iterators = (ly >> 4) << 4;
+    int row;
+
+    for (row = 0; row < max_iterators; row += 16)
+    {
+        for (int i = 0; i < 16; i++)
+        {
+            m1.load_a(fenc);
+            m1.cutoff(12);
+            n1.load(fref1);
+            n1.cutoff(12);
+            n2.load(fref2);
+            n2.cutoff(12);
+            n3.load(fref3);
+            n3.cutoff(12);
+
+            sad1.addSumAbsDiff(m1, n1);
+            sad2.addSumAbsDiff(m1, n2);
+            sad3.addSumAbsDiff(m1, n3);
+
+            fenc += FENC_STRIDE;
+            fref1 += frefstride;
+            fref2 += frefstride;
+            fref3 += frefstride;
+        }
+
+        sum1 += extend_low(sad1) + extend_high(sad1);
+        sum2 += extend_low(sad2) + extend_high(sad2);
+        sum3 += extend_low(sad3) + extend_high(sad3);
+        sad1 = 0;
+        sad2 = 0;
+        sad3 = 0;
+    }
+
+    while (row++ < ly)
+    {
+        m1.load_a(fenc);
+        m1.cutoff(12);
+        n1.load(fref1);
+        n1.cutoff(12);
+        n2.load(fref2);
+        n2.cutoff(12);
+        n3.load(fref3);
+        n3.cutoff(12);
+
+        sad1.addSumAbsDiff(m1, n1);
+        sad2.addSumAbsDiff(m1, n2);
+        sad3.addSumAbsDiff(m1, n3);
+
+        fenc += FENC_STRIDE;
+        fref1 += frefstride;
+        fref2 += frefstride;
+        fref3 += frefstride;
+    }
+
+    sum1 += extend_low(sad1) + extend_high(sad1);
+    sum2 += extend_low(sad2) + extend_high(sad2);
+    sum3 += extend_low(sad3) + extend_high(sad3);
+
+    res[0] = horizontal_add(sum1);
+    res[1] = horizontal_add(sum2);
+    res[2] = horizontal_add(sum3);
+}
+
+#if INSTRSET >= X265_CPU_LEVEL_SSE41
+template<int ly>
+void sad_x3_16(pixel *fenc, pixel *fref1, pixel *fref2, pixel *fref3, intptr_t frefstride, int *res)
+{
+    assert((ly % 4) == 0);
+
+    __m128i sum0, sum1;
+
+    __m128i T00, T01, T02, T03;
+    __m128i T10, T11, T12, T13;
+    __m128i T20, T21, T22, T23;
+
+    if (ly == 4)
+    {
+        T00 = _mm_load_si128((__m128i*)(fenc + (0) * FENC_STRIDE));
+        T01 = _mm_load_si128((__m128i*)(fenc + (1) * FENC_STRIDE));
+        T02 = _mm_load_si128((__m128i*)(fenc + (2) * FENC_STRIDE));
+        T03 = _mm_load_si128((__m128i*)(fenc + (3) * FENC_STRIDE));
+
+        T10 = _mm_loadu_si128((__m128i*)(fref1 + (0) * frefstride));
+        T11 = _mm_loadu_si128((__m128i*)(fref1 + (1) * frefstride));
+        T12 = _mm_loadu_si128((__m128i*)(fref1 + (2) * frefstride));
+        T13 = _mm_loadu_si128((__m128i*)(fref1 + (3) * frefstride));
+
+        T20 = _mm_sad_epu8(T00, T10);
+        T21 = _mm_sad_epu8(T01, T11);
+        T22 = _mm_sad_epu8(T02, T12);
+        T23 = _mm_sad_epu8(T03, T13);
+
+        T20 = _mm_add_epi16(T20, T21);
+        T22 = _mm_add_epi16(T22, T23);
+        sum0 = _mm_add_epi16(T20, T22);
+
+        sum1 = _mm_shuffle_epi32(sum0, 2);
+        sum0 = _mm_add_epi32(sum0, sum1);
+        res[0] = _mm_cvtsi128_si32(sum0);
+
+        T10 = _mm_loadu_si128((__m128i*)(fref2 + (0) * frefstride));
+        T11 = _mm_loadu_si128((__m128i*)(fref2 + (1) * frefstride));
+        T12 = _mm_loadu_si128((__m128i*)(fref2 + (2) * frefstride));
+        T13 = _mm_loadu_si128((__m128i*)(fref2 + (3) * frefstride));
+
+        T20 = _mm_sad_epu8(T00, T10);
+        T21 = _mm_sad_epu8(T01, T11);
+        T22 = _mm_sad_epu8(T02, T12);
+        T23 = _mm_sad_epu8(T03, T13);
+
+        T20 = _mm_add_epi16(T20, T21);
+        T22 = _mm_add_epi16(T22, T23);
+        sum0 = _mm_add_epi16(T20, T22);
+
+        sum1 = _mm_shuffle_epi32(sum0, 2);
+        sum0 = _mm_add_epi32(sum0, sum1);
+        res[1] = _mm_cvtsi128_si32(sum0);
+
+        T10 = _mm_loadu_si128((__m128i*)(fref3 + (0) * frefstride));
+        T11 = _mm_loadu_si128((__m128i*)(fref3 + (1) * frefstride));
+        T12 = _mm_loadu_si128((__m128i*)(fref3 + (2) * frefstride));
+        T13 = _mm_loadu_si128((__m128i*)(fref3 + (3) * frefstride));
+
+        T20 = _mm_sad_epu8(T00, T10);
+        T21 = _mm_sad_epu8(T01, T11);
+        T22 = _mm_sad_epu8(T02, T12);
+        T23 = _mm_sad_epu8(T03, T13);
+
+        T20 = _mm_add_epi16(T20, T21);
+        T22 = _mm_add_epi16(T22, T23);
+        sum0 = _mm_add_epi16(T20, T22);
+
+        sum1 = _mm_shuffle_epi32(sum0, 2);
+        sum0 = _mm_add_epi32(sum0, sum1);
+        res[2] = _mm_cvtsi128_si32(sum0);
+    }
+    else if (ly == 8)
+    {
+        T00 = _mm_load_si128((__m128i*)(fenc + (0) * FENC_STRIDE));
+        T01 = _mm_load_si128((__m128i*)(fenc + (1) * FENC_STRIDE));
+        T02 = _mm_load_si128((__m128i*)(fenc + (2) * FENC_STRIDE));
+        T03 = _mm_load_si128((__m128i*)(fenc + (3) * FENC_STRIDE));
+
+        T10 = _mm_loadu_si128((__m128i*)(fref1 + (0) * frefstride));
+        T11 = _mm_loadu_si128((__m128i*)(fref1 + (1) * frefstride));
+        T12 = _mm_loadu_si128((__m128i*)(fref1 + (2) * frefstride));
+        T13 = _mm_loadu_si128((__m128i*)(fref1 + (3) * frefstride));
+
+        T20 = _mm_sad_epu8(T00, T10);
+        T21 = _mm_sad_epu8(T01, T11);
+        T22 = _mm_sad_epu8(T02, T12);
+        T23 = _mm_sad_epu8(T03, T13);
+
+        T20 = _mm_add_epi16(T20, T21);
+        T22 = _mm_add_epi16(T22, T23);
+        sum0 = _mm_add_epi16(T20, T22);
+
+        sum1 = _mm_shuffle_epi32(sum0, 2);
+        sum0 = _mm_add_epi32(sum0, sum1);
+        res[0] = _mm_cvtsi128_si32(sum0);
+
+        T10 = _mm_loadu_si128((__m128i*)(fref2 + (0) * frefstride));
+        T11 = _mm_loadu_si128((__m128i*)(fref2 + (1) * frefstride));
+        T12 = _mm_loadu_si128((__m128i*)(fref2 + (2) * frefstride));
+        T13 = _mm_loadu_si128((__m128i*)(fref2 + (3) * frefstride));
+
+        T20 = _mm_sad_epu8(T00, T10);
+        T21 = _mm_sad_epu8(T01, T11);
+        T22 = _mm_sad_epu8(T02, T12);
+        T23 = _mm_sad_epu8(T03, T13);
+
+        T20 = _mm_add_epi16(T20, T21);
+        T22 = _mm_add_epi16(T22, T23);
+        sum0 = _mm_add_epi16(T20, T22);
+
+        sum1 = _mm_shuffle_epi32(sum0, 2);
+        sum0 = _mm_add_epi32(sum0, sum1);
+        res[1] = _mm_cvtsi128_si32(sum0);
+
+        T10 = _mm_loadu_si128((__m128i*)(fref3 + (0) * frefstride));
+        T11 = _mm_loadu_si128((__m128i*)(fref3 + (1) * frefstride));
+        T12 = _mm_loadu_si128((__m128i*)(fref3 + (2) * frefstride));
+        T13 = _mm_loadu_si128((__m128i*)(fref3 + (3) * frefstride));
+
+        T20 = _mm_sad_epu8(T00, T10);
+        T21 = _mm_sad_epu8(T01, T11);
+        T22 = _mm_sad_epu8(T02, T12);
+        T23 = _mm_sad_epu8(T03, T13);
+
+        T20 = _mm_add_epi16(T20, T21);
+        T22 = _mm_add_epi16(T22, T23);
+        sum0 = _mm_add_epi16(T20, T22);
+
+        sum1 = _mm_shuffle_epi32(sum0, 2);
+        sum0 = _mm_add_epi32(sum0, sum1);
+        res[2] = _mm_cvtsi128_si32(sum0);
+
+        T00 = _mm_load_si128((__m128i*)(fenc + (4) * FENC_STRIDE));
+        T01 = _mm_load_si128((__m128i*)(fenc + (5) * FENC_STRIDE));
+        T02 = _mm_load_si128((__m128i*)(fenc + (6) * FENC_STRIDE));
+        T03 = _mm_load_si128((__m128i*)(fenc + (7) * FENC_STRIDE));
+
+        T10 = _mm_loadu_si128((__m128i*)(fref1 + (4) * frefstride));
+        T11 = _mm_loadu_si128((__m128i*)(fref1 + (5) * frefstride));
+        T12 = _mm_loadu_si128((__m128i*)(fref1 + (6) * frefstride));
+        T13 = _mm_loadu_si128((__m128i*)(fref1 + (7) * frefstride));
+
+        T20 = _mm_sad_epu8(T00, T10);
+        T21 = _mm_sad_epu8(T01, T11);
+        T22 = _mm_sad_epu8(T02, T12);
+        T23 = _mm_sad_epu8(T03, T13);
+
+        T20 = _mm_add_epi16(T20, T21);
+        T22 = _mm_add_epi16(T22, T23);
+        sum0 = _mm_add_epi16(T20, T22);
+
+        sum1 = _mm_shuffle_epi32(sum0, 2);
+        sum0 = _mm_add_epi32(sum0, sum1);
+        res[0] = res[0] + _mm_cvtsi128_si32(sum0);
+
+        T10 = _mm_loadu_si128((__m128i*)(fref2 + (4) * frefstride));
+        T11 = _mm_loadu_si128((__m128i*)(fref2 + (5) * frefstride));
+        T12 = _mm_loadu_si128((__m128i*)(fref2 + (6) * frefstride));
+        T13 = _mm_loadu_si128((__m128i*)(fref2 + (7) * frefstride));
+
+        T20 = _mm_sad_epu8(T00, T10);
+        T21 = _mm_sad_epu8(T01, T11);
+        T22 = _mm_sad_epu8(T02, T12);
+        T23 = _mm_sad_epu8(T03, T13);
+
+        T20 = _mm_add_epi16(T20, T21);
+        T22 = _mm_add_epi16(T22, T23);
+        sum0 = _mm_add_epi16(T20, T22);
+
+        sum1 = _mm_shuffle_epi32(sum0, 2);
+        sum0 = _mm_add_epi32(sum0, sum1);
+        res[1] = res[1] + _mm_cvtsi128_si32(sum0);
+
+        T10 = _mm_loadu_si128((__m128i*)(fref3 + (4) * frefstride));
+        T11 = _mm_loadu_si128((__m128i*)(fref3 + (5) * frefstride));
+        T12 = _mm_loadu_si128((__m128i*)(fref3 + (6) * frefstride));
+        T13 = _mm_loadu_si128((__m128i*)(fref3 + (7) * frefstride));
+
+        T20 = _mm_sad_epu8(T00, T10);
+        T21 = _mm_sad_epu8(T01, T11);
+        T22 = _mm_sad_epu8(T02, T12);
+        T23 = _mm_sad_epu8(T03, T13);
+
+        T20 = _mm_add_epi16(T20, T21);
+        T22 = _mm_add_epi16(T22, T23);
+        sum0 = _mm_add_epi16(T20, T22);
+
+        sum1 = _mm_shuffle_epi32(sum0, 2);
+        sum0 = _mm_add_epi32(sum0, sum1);
+        res[2] = res[2] + _mm_cvtsi128_si32(sum0);
+    }
+    else if (ly == 16)
+    {
+        T00 = _mm_load_si128((__m128i*)(fenc + (0) * FENC_STRIDE));
+        T01 = _mm_load_si128((__m128i*)(fenc + (1) * FENC_STRIDE));
+        T02 = _mm_load_si128((__m128i*)(fenc + (2) * FENC_STRIDE));
+        T03 = _mm_load_si128((__m128i*)(fenc + (3) * FENC_STRIDE));
+
+        T10 = _mm_loadu_si128((__m128i*)(fref1 + (0) * frefstride));
+        T11 = _mm_loadu_si128((__m128i*)(fref1 + (1) * frefstride));
+        T12 = _mm_loadu_si128((__m128i*)(fref1 + (2) * frefstride));
+        T13 = _mm_loadu_si128((__m128i*)(fref1 + (3) * frefstride));
+
+        T20 = _mm_sad_epu8(T00, T10);
+        T21 = _mm_sad_epu8(T01, T11);
+        T22 = _mm_sad_epu8(T02, T12);
+        T23 = _mm_sad_epu8(T03, T13);
+
+        T20 = _mm_add_epi16(T20, T21);
+        T22 = _mm_add_epi16(T22, T23);
+        sum0 = _mm_add_epi16(T20, T22);
+
+        sum1 = _mm_shuffle_epi32(sum0, 2);
+        sum0 = _mm_add_epi32(sum0, sum1);
+        res[0] = _mm_cvtsi128_si32(sum0);
+
+        T10 = _mm_loadu_si128((__m128i*)(fref2 + (0) * frefstride));
+        T11 = _mm_loadu_si128((__m128i*)(fref2 + (1) * frefstride));
+        T12 = _mm_loadu_si128((__m128i*)(fref2 + (2) * frefstride));
+        T13 = _mm_loadu_si128((__m128i*)(fref2 + (3) * frefstride));
+
+        T20 = _mm_sad_epu8(T00, T10);
+        T21 = _mm_sad_epu8(T01, T11);
+        T22 = _mm_sad_epu8(T02, T12);
+        T23 = _mm_sad_epu8(T03, T13);
+
+        T20 = _mm_add_epi16(T20, T21);
+        T22 = _mm_add_epi16(T22, T23);
+        sum0 = _mm_add_epi16(T20, T22);
+
+        sum1 = _mm_shuffle_epi32(sum0, 2);
+        sum0 = _mm_add_epi32(sum0, sum1);
+        res[1] = _mm_cvtsi128_si32(sum0);
+
+        T10 = _mm_loadu_si128((__m128i*)(fref3 + (0) * frefstride));
+        T11 = _mm_loadu_si128((__m128i*)(fref3 + (1) * frefstride));
+        T12 = _mm_loadu_si128((__m128i*)(fref3 + (2) * frefstride));
+        T13 = _mm_loadu_si128((__m128i*)(fref3 + (3) * frefstride));
+
+        T20 = _mm_sad_epu8(T00, T10);
+        T21 = _mm_sad_epu8(T01, T11);
+        T22 = _mm_sad_epu8(T02, T12);
+        T23 = _mm_sad_epu8(T03, T13);
+
+        T20 = _mm_add_epi16(T20, T21);
+        T22 = _mm_add_epi16(T22, T23);
+        sum0 = _mm_add_epi16(T20, T22);
+
+        sum1 = _mm_shuffle_epi32(sum0, 2);
+        sum0 = _mm_add_epi32(sum0, sum1);
+        res[2] = _mm_cvtsi128_si32(sum0);
+
+        T00 = _mm_load_si128((__m128i*)(fenc + (4) * FENC_STRIDE));
+        T01 = _mm_load_si128((__m128i*)(fenc + (5) * FENC_STRIDE));
+        T02 = _mm_load_si128((__m128i*)(fenc + (6) * FENC_STRIDE));
+        T03 = _mm_load_si128((__m128i*)(fenc + (7) * FENC_STRIDE));
+
+        T10 = _mm_loadu_si128((__m128i*)(fref1 + (4) * frefstride));
+        T11 = _mm_loadu_si128((__m128i*)(fref1 + (5) * frefstride));
+        T12 = _mm_loadu_si128((__m128i*)(fref1 + (6) * frefstride));
+        T13 = _mm_loadu_si128((__m128i*)(fref1 + (7) * frefstride));
+
+        T20 = _mm_sad_epu8(T00, T10);
+        T21 = _mm_sad_epu8(T01, T11);
+        T22 = _mm_sad_epu8(T02, T12);
+        T23 = _mm_sad_epu8(T03, T13);
+
+        T20 = _mm_add_epi16(T20, T21);
+        T22 = _mm_add_epi16(T22, T23);
+        sum0 = _mm_add_epi16(T20, T22);
+
+        sum1 = _mm_shuffle_epi32(sum0, 2);
+        sum0 = _mm_add_epi32(sum0, sum1);
+        res[0] = res[0] + _mm_cvtsi128_si32(sum0);
+
+        T10 = _mm_loadu_si128((__m128i*)(fref2 + (4) * frefstride));
+        T11 = _mm_loadu_si128((__m128i*)(fref2 + (5) * frefstride));
+        T12 = _mm_loadu_si128((__m128i*)(fref2 + (6) * frefstride));
+        T13 = _mm_loadu_si128((__m128i*)(fref2 + (7) * frefstride));
+
+        T20 = _mm_sad_epu8(T00, T10);
+        T21 = _mm_sad_epu8(T01, T11);
+        T22 = _mm_sad_epu8(T02, T12);
+        T23 = _mm_sad_epu8(T03, T13);
+
+        T20 = _mm_add_epi16(T20, T21);
+        T22 = _mm_add_epi16(T22, T23);
+        sum0 = _mm_add_epi16(T20, T22);
+
+        sum1 = _mm_shuffle_epi32(sum0, 2);
+        sum0 = _mm_add_epi32(sum0, sum1);
+        res[1] = res[1] + _mm_cvtsi128_si32(sum0);
+
+        T10 = _mm_loadu_si128((__m128i*)(fref3 + (4) * frefstride));
+        T11 = _mm_loadu_si128((__m128i*)(fref3 + (5) * frefstride));
+        T12 = _mm_loadu_si128((__m128i*)(fref3 + (6) * frefstride));
+        T13 = _mm_loadu_si128((__m128i*)(fref3 + (7) * frefstride));
+
+        T20 = _mm_sad_epu8(T00, T10);
+        T21 = _mm_sad_epu8(T01, T11);
+        T22 = _mm_sad_epu8(T02, T12);
+        T23 = _mm_sad_epu8(T03, T13);
+
+        T20 = _mm_add_epi16(T20, T21);
+        T22 = _mm_add_epi16(T22, T23);
+        sum0 = _mm_add_epi16(T20, T22);
+
+        sum1 = _mm_shuffle_epi32(sum0, 2);
+        sum0 = _mm_add_epi32(sum0, sum1);
+        res[2] = res[2] + _mm_cvtsi128_si32(sum0);
+
+        T00 = _mm_load_si128((__m128i*)(fenc + (8) * FENC_STRIDE));
+        T01 = _mm_load_si128((__m128i*)(fenc + (9) * FENC_STRIDE));
+        T02 = _mm_load_si128((__m128i*)(fenc + (10) * FENC_STRIDE));
+        T03 = _mm_load_si128((__m128i*)(fenc + (11) * FENC_STRIDE));
+
+        T10 = _mm_loadu_si128((__m128i*)(fref1 + (8) * frefstride));
+        T11 = _mm_loadu_si128((__m128i*)(fref1 + (9) * frefstride));
+        T12 = _mm_loadu_si128((__m128i*)(fref1 + (10) * frefstride));
+        T13 = _mm_loadu_si128((__m128i*)(fref1 + (11) * frefstride));
+
+        T20 = _mm_sad_epu8(T00, T10);
+        T21 = _mm_sad_epu8(T01, T11);
+        T22 = _mm_sad_epu8(T02, T12);
+        T23 = _mm_sad_epu8(T03, T13);
+
+        T20 = _mm_add_epi16(T20, T21);
+        T22 = _mm_add_epi16(T22, T23);
+        sum0 = _mm_add_epi16(T20, T22);
+
+        sum1 = _mm_shuffle_epi32(sum0, 2);
+        sum0 = _mm_add_epi32(sum0, sum1);
+        res[0] = res[0] + _mm_cvtsi128_si32(sum0);
+
+        T10 = _mm_loadu_si128((__m128i*)(fref2 + (8) * frefstride));
+        T11 = _mm_loadu_si128((__m128i*)(fref2 + (9) * frefstride));
+        T12 = _mm_loadu_si128((__m128i*)(fref2 + (10) * frefstride));
+        T13 = _mm_loadu_si128((__m128i*)(fref2 + (11) * frefstride));
+
+        T20 = _mm_sad_epu8(T00, T10);
+        T21 = _mm_sad_epu8(T01, T11);
+        T22 = _mm_sad_epu8(T02, T12);
+        T23 = _mm_sad_epu8(T03, T13);
+
+        T20 = _mm_add_epi16(T20, T21);
+        T22 = _mm_add_epi16(T22, T23);
+        sum0 = _mm_add_epi16(T20, T22);
+
+        sum1 = _mm_shuffle_epi32(sum0, 2);
+        sum0 = _mm_add_epi32(sum0, sum1);
+        res[1] = res[1] + _mm_cvtsi128_si32(sum0);
+
+        T10 = _mm_loadu_si128((__m128i*)(fref3 + (8) * frefstride));
+        T11 = _mm_loadu_si128((__m128i*)(fref3 + (9) * frefstride));
+        T12 = _mm_loadu_si128((__m128i*)(fref3 + (10) * frefstride));
+        T13 = _mm_loadu_si128((__m128i*)(fref3 + (11) * frefstride));
+
+        T20 = _mm_sad_epu8(T00, T10);
+        T21 = _mm_sad_epu8(T01, T11);
+        T22 = _mm_sad_epu8(T02, T12);
+        T23 = _mm_sad_epu8(T03, T13);
+
+        T20 = _mm_add_epi16(T20, T21);
+        T22 = _mm_add_epi16(T22, T23);
+        sum0 = _mm_add_epi16(T20, T22);
+
+        sum1 = _mm_shuffle_epi32(sum0, 2);
+        sum0 = _mm_add_epi32(sum0, sum1);
+        res[2] = res[2] + _mm_cvtsi128_si32(sum0);
+
+        T00 = _mm_load_si128((__m128i*)(fenc + (12) * FENC_STRIDE));
+        T01 = _mm_load_si128((__m128i*)(fenc + (13) * FENC_STRIDE));
+        T02 = _mm_load_si128((__m128i*)(fenc + (14) * FENC_STRIDE));
+        T03 = _mm_load_si128((__m128i*)(fenc + (15) * FENC_STRIDE));
+
+        T10 = _mm_loadu_si128((__m128i*)(fref1 + (12) * frefstride));
+        T11 = _mm_loadu_si128((__m128i*)(fref1 + (13) * frefstride));
+        T12 = _mm_loadu_si128((__m128i*)(fref1 + (14) * frefstride));
+        T13 = _mm_loadu_si128((__m128i*)(fref1 + (15) * frefstride));
+
+        T20 = _mm_sad_epu8(T00, T10);
+        T21 = _mm_sad_epu8(T01, T11);
+        T22 = _mm_sad_epu8(T02, T12);
+        T23 = _mm_sad_epu8(T03, T13);
+
+        T20 = _mm_add_epi16(T20, T21);
+        T22 = _mm_add_epi16(T22, T23);
+        sum0 = _mm_add_epi16(T20, T22);
+
+        sum1 = _mm_shuffle_epi32(sum0, 2);
+        sum0 = _mm_add_epi32(sum0, sum1);
+        res[0] = res[0] + _mm_cvtsi128_si32(sum0);
+
+        T10 = _mm_loadu_si128((__m128i*)(fref2 + (12) * frefstride));
+        T11 = _mm_loadu_si128((__m128i*)(fref2 + (13) * frefstride));
+        T12 = _mm_loadu_si128((__m128i*)(fref2 + (14) * frefstride));
+        T13 = _mm_loadu_si128((__m128i*)(fref2 + (15) * frefstride));
+
+        T20 = _mm_sad_epu8(T00, T10);
+        T21 = _mm_sad_epu8(T01, T11);
+        T22 = _mm_sad_epu8(T02, T12);
+        T23 = _mm_sad_epu8(T03, T13);
+
+        T20 = _mm_add_epi16(T20, T21);
+        T22 = _mm_add_epi16(T22, T23);
+        sum0 = _mm_add_epi16(T20, T22);
+
+        sum1 = _mm_shuffle_epi32(sum0, 2);
+        sum0 = _mm_add_epi32(sum0, sum1);
+        res[1] = res[1] + _mm_cvtsi128_si32(sum0);
+
+        T10 = _mm_loadu_si128((__m128i*)(fref3 + (12) * frefstride));
+        T11 = _mm_loadu_si128((__m128i*)(fref3 + (13) * frefstride));
+        T12 = _mm_loadu_si128((__m128i*)(fref3 + (14) * frefstride));
+        T13 = _mm_loadu_si128((__m128i*)(fref3 + (15) * frefstride));
+
+        T20 = _mm_sad_epu8(T00, T10);
+        T21 = _mm_sad_epu8(T01, T11);
+        T22 = _mm_sad_epu8(T02, T12);
+        T23 = _mm_sad_epu8(T03, T13);
+
+        T20 = _mm_add_epi16(T20, T21);
+        T22 = _mm_add_epi16(T22, T23);
+        sum0 = _mm_add_epi16(T20, T22);
+
+        sum1 = _mm_shuffle_epi32(sum0, 2);
+        sum0 = _mm_add_epi32(sum0, sum1);
+        res[2] = res[2] + _mm_cvtsi128_si32(sum0);
+    }
+    else if ((ly % 8) == 0)
+    {
+        res[0] = res[1] = res[2] = 0;
+        for (int i = 0; i < ly; i += 8)
+        {
+            T00 = _mm_load_si128((__m128i*)(fenc + (i + 0) * FENC_STRIDE));
+            T01 = _mm_load_si128((__m128i*)(fenc + (i + 1) * FENC_STRIDE));
+            T02 = _mm_load_si128((__m128i*)(fenc + (i + 2) * FENC_STRIDE));
+            T03 = _mm_load_si128((__m128i*)(fenc + (i + 3) * FENC_STRIDE));
+
+            T10 = _mm_loadu_si128((__m128i*)(fref1 + (i + 0) * frefstride));
+            T11 = _mm_loadu_si128((__m128i*)(fref1 + (i + 1) * frefstride));
+            T12 = _mm_loadu_si128((__m128i*)(fref1 + (i + 2) * frefstride));
+            T13 = _mm_loadu_si128((__m128i*)(fref1 + (i + 3) * frefstride));
+
+            T20 = _mm_sad_epu8(T00, T10);
+            T21 = _mm_sad_epu8(T01, T11);
+            T22 = _mm_sad_epu8(T02, T12);
+            T23 = _mm_sad_epu8(T03, T13);
+
+            T20 = _mm_add_epi16(T20, T21);
+            T22 = _mm_add_epi16(T22, T23);
+            sum0 = _mm_add_epi16(T20, T22);
+
+            sum1 = _mm_shuffle_epi32(sum0, 2);
+            sum0 = _mm_add_epi32(sum0, sum1);
+            res[0] = res[0] + _mm_cvtsi128_si32(sum0);
+
+            T10 = _mm_loadu_si128((__m128i*)(fref2 + (i + 0) * frefstride));
+            T11 = _mm_loadu_si128((__m128i*)(fref2 + (i + 1) * frefstride));
+            T12 = _mm_loadu_si128((__m128i*)(fref2 + (i + 2) * frefstride));
+            T13 = _mm_loadu_si128((__m128i*)(fref2 + (i + 3) * frefstride));
+
+            T20 = _mm_sad_epu8(T00, T10);
+            T21 = _mm_sad_epu8(T01, T11);
+            T22 = _mm_sad_epu8(T02, T12);
+            T23 = _mm_sad_epu8(T03, T13);
+
+            T20 = _mm_add_epi16(T20, T21);
+            T22 = _mm_add_epi16(T22, T23);
+            sum0 = _mm_add_epi16(T20, T22);
+
+            sum1 = _mm_shuffle_epi32(sum0, 2);
+            sum0 = _mm_add_epi32(sum0, sum1);
+            res[1] = res[1] + _mm_cvtsi128_si32(sum0);
+
+            T10 = _mm_loadu_si128((__m128i*)(fref3 + (i + 0) * frefstride));
+            T11 = _mm_loadu_si128((__m128i*)(fref3 + (i + 1) * frefstride));
+            T12 = _mm_loadu_si128((__m128i*)(fref3 + (i + 2) * frefstride));
+            T13 = _mm_loadu_si128((__m128i*)(fref3 + (i + 3) * frefstride));
+
+            T20 = _mm_sad_epu8(T00, T10);
+            T21 = _mm_sad_epu8(T01, T11);
+            T22 = _mm_sad_epu8(T02, T12);
+            T23 = _mm_sad_epu8(T03, T13);
+
+            T20 = _mm_add_epi16(T20, T21);
+            T22 = _mm_add_epi16(T22, T23);
+            sum0 = _mm_add_epi16(T20, T22);
+
+            sum1 = _mm_shuffle_epi32(sum0, 2);
+            sum0 = _mm_add_epi32(sum0, sum1);
+            res[2] = res[2] + _mm_cvtsi128_si32(sum0);
+
+            T00 = _mm_load_si128((__m128i*)(fenc + (i + 4) * FENC_STRIDE));
+            T01 = _mm_load_si128((__m128i*)(fenc + (i + 5) * FENC_STRIDE));
+            T02 = _mm_load_si128((__m128i*)(fenc + (i + 6) * FENC_STRIDE));
+            T03 = _mm_load_si128((__m128i*)(fenc + (i + 7) * FENC_STRIDE));
+
+            T10 = _mm_loadu_si128((__m128i*)(fref1 + (i + 4) * frefstride));
+            T11 = _mm_loadu_si128((__m128i*)(fref1 + (i + 5) * frefstride));
+            T12 = _mm_loadu_si128((__m128i*)(fref1 + (i + 6) * frefstride));
+            T13 = _mm_loadu_si128((__m128i*)(fref1 + (i + 7) * frefstride));
+
+            T20 = _mm_sad_epu8(T00, T10);
+            T21 = _mm_sad_epu8(T01, T11);
+            T22 = _mm_sad_epu8(T02, T12);
+            T23 = _mm_sad_epu8(T03, T13);
+
+            T20 = _mm_add_epi16(T20, T21);
+            T22 = _mm_add_epi16(T22, T23);
+            sum0 = _mm_add_epi16(T20, T22);
+
+            sum1 = _mm_shuffle_epi32(sum0, 2);
+            sum0 = _mm_add_epi32(sum0, sum1);
+            res[0] = res[0] + _mm_cvtsi128_si32(sum0);
+
+            T10 = _mm_loadu_si128((__m128i*)(fref2 + (i + 4) * frefstride));
+            T11 = _mm_loadu_si128((__m128i*)(fref2 + (i + 5) * frefstride));
+            T12 = _mm_loadu_si128((__m128i*)(fref2 + (i + 6) * frefstride));
+            T13 = _mm_loadu_si128((__m128i*)(fref2 + (i + 7) * frefstride));
+
+            T20 = _mm_sad_epu8(T00, T10);
+            T21 = _mm_sad_epu8(T01, T11);
+            T22 = _mm_sad_epu8(T02, T12);
+            T23 = _mm_sad_epu8(T03, T13);
+
+            T20 = _mm_add_epi16(T20, T21);
+            T22 = _mm_add_epi16(T22, T23);
+            sum0 = _mm_add_epi16(T20, T22);
+
+            sum1 = _mm_shuffle_epi32(sum0, 2);
+            sum0 = _mm_add_epi32(sum0, sum1);
+            res[1] = res[1] + _mm_cvtsi128_si32(sum0);
+
+            T10 = _mm_loadu_si128((__m128i*)(fref3 + (i + 4) * frefstride));
+            T11 = _mm_loadu_si128((__m128i*)(fref3 + (i + 5) * frefstride));
+            T12 = _mm_loadu_si128((__m128i*)(fref3 + (i + 6) * frefstride));
+            T13 = _mm_loadu_si128((__m128i*)(fref3 + (i + 7) * frefstride));
+
+            T20 = _mm_sad_epu8(T00, T10);
+            T21 = _mm_sad_epu8(T01, T11);
+            T22 = _mm_sad_epu8(T02, T12);
+            T23 = _mm_sad_epu8(T03, T13);
+
+            T20 = _mm_add_epi16(T20, T21);
+            T22 = _mm_add_epi16(T22, T23);
+            sum0 = _mm_add_epi16(T20, T22);
+
+            sum1 = _mm_shuffle_epi32(sum0, 2);
+            sum0 = _mm_add_epi32(sum0, sum1);
+            res[2] = res[2] + _mm_cvtsi128_si32(sum0);
+        }
+    }
+    else
+    {
+        res[0] = res[1] = res[2] = 0;
+        for (int i = 0; i < ly; i += 4)
+        {
+            T00 = _mm_load_si128((__m128i*)(fenc + (i + 0) * FENC_STRIDE));
+            T01 = _mm_load_si128((__m128i*)(fenc + (i + 1) * FENC_STRIDE));
+            T02 = _mm_load_si128((__m128i*)(fenc + (i + 2) * FENC_STRIDE));
+            T03 = _mm_load_si128((__m128i*)(fenc + (i + 3) * FENC_STRIDE));
+
+            T10 = _mm_loadu_si128((__m128i*)(fref1 + (i + 0) * frefstride));
+            T11 = _mm_loadu_si128((__m128i*)(fref1 + (i + 1) * frefstride));
+            T12 = _mm_loadu_si128((__m128i*)(fref1 + (i + 2) * frefstride));
+            T13 = _mm_loadu_si128((__m128i*)(fref1 + (i + 3) * frefstride));
+
+            T20 = _mm_sad_epu8(T00, T10);
+            T21 = _mm_sad_epu8(T01, T11);
+            T22 = _mm_sad_epu8(T02, T12);
+            T23 = _mm_sad_epu8(T03, T13);
+
+            T20 = _mm_add_epi16(T20, T21);
+            T22 = _mm_add_epi16(T22, T23);
+            sum0 = _mm_add_epi16(T20, T22);
+
+            sum1 = _mm_shuffle_epi32(sum0, 2);
+            sum0 = _mm_add_epi32(sum0, sum1);
+            res[0] = res[0] + _mm_cvtsi128_si32(sum0);
+
+            T10 = _mm_loadu_si128((__m128i*)(fref2 + (i + 0) * frefstride));
+            T11 = _mm_loadu_si128((__m128i*)(fref2 + (i + 1) * frefstride));
+            T12 = _mm_loadu_si128((__m128i*)(fref2 + (i + 2) * frefstride));
+            T13 = _mm_loadu_si128((__m128i*)(fref2 + (i + 3) * frefstride));
+
+            T20 = _mm_sad_epu8(T00, T10);
+            T21 = _mm_sad_epu8(T01, T11);
+            T22 = _mm_sad_epu8(T02, T12);
+            T23 = _mm_sad_epu8(T03, T13);
+
+            T20 = _mm_add_epi16(T20, T21);
+            T22 = _mm_add_epi16(T22, T23);
+            sum0 = _mm_add_epi16(T20, T22);
+
+            sum1 = _mm_shuffle_epi32(sum0, 2);
+            sum0 = _mm_add_epi32(sum0, sum1);
+            res[1] = res[1] + _mm_cvtsi128_si32(sum0);
+
+            T10 = _mm_loadu_si128((__m128i*)(fref3 + (i + 0) * frefstride));
+            T11 = _mm_loadu_si128((__m128i*)(fref3 + (i + 1) * frefstride));
+            T12 = _mm_loadu_si128((__m128i*)(fref3 + (i + 2) * frefstride));
+            T13 = _mm_loadu_si128((__m128i*)(fref3 + (i + 3) * frefstride));
+
+            T20 = _mm_sad_epu8(T00, T10);
+            T21 = _mm_sad_epu8(T01, T11);
+            T22 = _mm_sad_epu8(T02, T12);
+            T23 = _mm_sad_epu8(T03, T13);
+
+            T20 = _mm_add_epi16(T20, T21);
+            T22 = _mm_add_epi16(T22, T23);
+            sum0 = _mm_add_epi16(T20, T22);
+
+            sum1 = _mm_shuffle_epi32(sum0, 2);
+            sum0 = _mm_add_epi32(sum0, sum1);
+            res[2] = res[2] + _mm_cvtsi128_si32(sum0);
+        }
+    }
+}
+
+#endif /* if INSTRSET >= X265_CPU_LEVEL_SSE41 */
+
+template<int ly>
+void sad_x3_24(pixel *fenc, pixel *fref1, pixel *fref2, pixel *fref3, intptr_t frefstride, int *res)
+{
+    Vec16uc m1, n1, n2, n3;
+
+    Vec4i sum1(0), sum2(0), sum3(0);
+    Vec8us sad1(0), sad2(0), sad3(0);
+    int max_iterators = (ly >> 4) << 4;
+    int row;
+
+    for (row = 0; row < max_iterators; row += 16)
+    {
+        for (int i = 0; i < 16; i++)
+        {
+            m1.load_a(fenc);
+            n1.load(fref1);
+            n2.load(fref2);
+            n3.load(fref3);
+
+            sad1.addSumAbsDiff(m1, n1);
+            sad2.addSumAbsDiff(m1, n2);
+            sad3.addSumAbsDiff(m1, n3);
+
+            m1.load_a(fenc + 16);
+            m1.cutoff(8);
+            n1.load(fref1 + 16);
+            n1.cutoff(8);
+            n2.load(fref2 + 16);
+            n2.cutoff(8);
+            n3.load(fref3 + 16);
+            n3.cutoff(8);
+
+            sad1.addSumAbsDiff(m1, n1);
+            sad2.addSumAbsDiff(m1, n2);
+            sad3.addSumAbsDiff(m1, n3);
+
+            fenc += FENC_STRIDE;
+            fref1 += frefstride;
+            fref2 += frefstride;
+            fref3 += frefstride;
+        }
+
+        sum1 += extend_low(sad1) + extend_high(sad1);
+        sum2 += extend_low(sad2) + extend_high(sad2);
+        sum3 += extend_low(sad3) + extend_high(sad3);
+        sad1 = 0;
+        sad2 = 0;
+        sad3 = 0;
+    }
+
+    while (row++ < ly)
+    {
+        m1.load_a(fenc);
+        n1.load(fref1);
+        n2.load(fref2);
+        n3.load(fref3);
+
+        sad1.addSumAbsDiff(m1, n1);
+        sad2.addSumAbsDiff(m1, n2);
+        sad3.addSumAbsDiff(m1, n3);
+
+        m1.load_a(fenc + 16);
+        m1.cutoff(8);
+        n1.load(fref1 + 16);
+        n1.cutoff(8);
+        n2.load(fref2 + 16);
+        n2.cutoff(8);
+        n3.load(fref3 + 16);
+        n3.cutoff(8);
+
+        sad1.addSumAbsDiff(m1, n1);
+        sad2.addSumAbsDiff(m1, n2);
+        sad3.addSumAbsDiff(m1, n3);
+
+        fenc += FENC_STRIDE;
+        fref1 += frefstride;
+        fref2 += frefstride;
+        fref3 += frefstride;
+    }
+
+    sum1 += extend_low(sad1) + extend_high(sad1);
+    sum2 += extend_low(sad2) + extend_high(sad2);
+    sum3 += extend_low(sad3) + extend_high(sad3);
+
+    res[0] = horizontal_add(sum1);
+    res[1] = horizontal_add(sum2);
+    res[2] = horizontal_add(sum3);
+}
+
+template<int ly>
+void sad_x3_32(pixel *fenc, pixel *fref1, pixel *fref2, pixel *fref3, intptr_t frefstride, int *res)
+{
+    Vec16uc m1, n1, n2, n3;
+
+    Vec4i sum1(0), sum2(0), sum3(0);
+    Vec8us sad1(0), sad2(0), sad3(0);
+    int max_iterators = (ly >> 3) << 3;
+    int row;
+
+    for (row = 0; row < max_iterators; row += 8)
+    {
+        for (int i = 0; i < 8; i++)
+        {
+            m1.load_a(fenc);
+            n1.load(fref1);
+            n2.load(fref2);
+            n3.load(fref3);
+
+            sad1.addSumAbsDiff(m1, n1);
+            sad2.addSumAbsDiff(m1, n2);
+            sad3.addSumAbsDiff(m1, n3);
+
+            m1.load_a(fenc + 16);
+            n1.load(fref1 + 16);
+            n2.load(fref2 + 16);
+            n3.load(fref3 + 16);
+
+            sad1.addSumAbsDiff(m1, n1);
+            sad2.addSumAbsDiff(m1, n2);
+            sad3.addSumAbsDiff(m1, n3);
+
+            fenc += FENC_STRIDE;
+            fref1 += frefstride;
+            fref2 += frefstride;
+            fref3 += frefstride;
+        }
+
+        sum1 += extend_low(sad1) + extend_high(sad1);
+        sum2 += extend_low(sad2) + extend_high(sad2);
+        sum3 += extend_low(sad3) + extend_high(sad3);
+        sad1 = 0;
+        sad2 = 0;
+        sad3 = 0;
+    }
+
+    while (row++ < ly)
+    {
+        m1.load_a(fenc);
+        n1.load(fref1);
+        n2.load(fref2);
+        n3.load(fref3);
+
+        sad1.addSumAbsDiff(m1, n1);
+        sad2.addSumAbsDiff(m1, n2);
+        sad3.addSumAbsDiff(m1, n3);
+
+        m1.load_a(fenc + 16);
+        n1.load(fref1 + 16);
+        n2.load(fref2 + 16);
+        n3.load(fref3 + 16);
+
+        sad1.addSumAbsDiff(m1, n1);
+        sad2.addSumAbsDiff(m1, n2);
+        sad3.addSumAbsDiff(m1, n3);
+
+        fenc += FENC_STRIDE;
+        fref1 += frefstride;
+        fref2 += frefstride;
+        fref3 += frefstride;
+    }
+
+    sum1 += extend_low(sad1) + extend_high(sad1);
+    sum2 += extend_low(sad2) + extend_high(sad2);
+    sum3 += extend_low(sad3) + extend_high(sad3);
+
+    res[0] = horizontal_add(sum1);
+    res[1] = horizontal_add(sum2);
+    res[2] = horizontal_add(sum3);
+}
+
+template<int ly>
+void sad_x3_48(pixel *fenc, pixel *fref1, pixel *fref2, pixel *fref3, intptr_t frefstride, int *res)
+{
+    Vec16uc m1, n1, n2, n3;
+
+    Vec4i sum1(0), sum2(0), sum3(0);
+    Vec8us sad1(0), sad2(0), sad3(0);
+    int max_iterators = (ly >> 3) << 3;
+    int row;
+
+    for (row = 0; row < max_iterators; row += 8)
+    {
+        for (int i = 0; i < 8; i++)
+        {
+            m1.load_a(fenc);
+            n1.load(fref1);
+            n2.load(fref2);
+            n3.load(fref3);
+
+            sad1.addSumAbsDiff(m1, n1);
+            sad2.addSumAbsDiff(m1, n2);
+            sad3.addSumAbsDiff(m1, n3);
+
+            m1.load_a(fenc + 16);
+            n1.load(fref1 + 16);
+            n2.load(fref2 + 16);
+            n3.load(fref3 + 16);
+
+            sad1.addSumAbsDiff(m1, n1);
+            sad2.addSumAbsDiff(m1, n2);
+            sad3.addSumAbsDiff(m1, n3);
+
+            m1.load_a(fenc + 32);
+            n1.load(fref1 + 32);
+            n2.load(fref2 + 32);
+            n3.load(fref3 + 32);
+
+            sad1.addSumAbsDiff(m1, n1);
+            sad2.addSumAbsDiff(m1, n2);
+            sad3.addSumAbsDiff(m1, n3);
+
+            fenc += FENC_STRIDE;
+            fref1 += frefstride;
+            fref2 += frefstride;
+            fref3 += frefstride;
+        }
+
+        sum1 += extend_low(sad1) + extend_high(sad1);
+        sum2 += extend_low(sad2) + extend_high(sad2);
+        sum3 += extend_low(sad3) + extend_high(sad3);
+        sad1 = 0;
+        sad2 = 0;
+        sad3 = 0;
+    }
+
+    while (row++ < ly)
+    {
+        m1.load_a(fenc);
+        n1.load(fref1);
+        n2.load(fref2);
+        n3.load(fref3);
+
+        sad1.addSumAbsDiff(m1, n1);
+        sad2.addSumAbsDiff(m1, n2);
+        sad3.addSumAbsDiff(m1, n3);
+
+        m1.load_a(fenc + 16);
+        n1.load(fref1 + 16);
+        n2.load(fref2 + 16);
+        n3.load(fref3 + 16);
+
+        sad1.addSumAbsDiff(m1, n1);
+        sad2.addSumAbsDiff(m1, n2);
+        sad3.addSumAbsDiff(m1, n3);
+
+        m1.load_a(fenc + 32);
+        n1.load(fref1 + 32);
+        n2.load(fref2 + 32);
+        n3.load(fref3 + 32);
+
+        sad1.addSumAbsDiff(m1, n1);
+        sad2.addSumAbsDiff(m1, n2);
+        sad3.addSumAbsDiff(m1, n3);
+
+        fenc += FENC_STRIDE;
+        fref1 += frefstride;
+        fref2 += frefstride;
+        fref3 += frefstride;
+    }
+
+    sum1 += extend_low(sad1) + extend_high(sad1);
+    sum2 += extend_low(sad2) + extend_high(sad2);
+    sum3 += extend_low(sad3) + extend_high(sad3);
+
+    res[0] = horizontal_add(sum1);
+    res[1] = horizontal_add(sum2);
+    res[2] = horizontal_add(sum3);
+}
+
+template<int ly>
+void sad_x3_64(pixel *fenc, pixel *fref1, pixel *fref2, pixel *fref3, intptr_t frefstride, int *res)
+{
+    Vec16uc m1, n1, n2, n3;
+
+    Vec4i sum1(0), sum2(0), sum3(0);
+    Vec8us sad1(0), sad2(0), sad3(0);
+    int row;
+
+    for (row = 0; row < ly; row += 4)
+    {
+        for (int i = 0; i < 4; i++)
+        {
+            m1.load_a(fenc);
+            n1.load(fref1);
+            n2.load(fref2);
+            n3.load(fref3);
+
+            sad1.addSumAbsDiff(m1, n1);
+            sad2.addSumAbsDiff(m1, n2);
+            sad3.addSumAbsDiff(m1, n3);
+
+            m1.load_a(fenc + 16);
+            n1.load(fref1 + 16);
+            n2.load(fref2 + 16);
+            n3.load(fref3 + 16);
+
+            sad1.addSumAbsDiff(m1, n1);
+            sad2.addSumAbsDiff(m1, n2);
+            sad3.addSumAbsDiff(m1, n3);
+
+            m1.load_a(fenc + 32);
+            n1.load(fref1 + 32);
+            n2.load(fref2 + 32);
+            n3.load(fref3 + 32);
+
+            sad1.addSumAbsDiff(m1, n1);
+            sad2.addSumAbsDiff(m1, n2);
+            sad3.addSumAbsDiff(m1, n3);
+
+            m1.load_a(fenc + 48);
+            n1.load(fref1 + 48);
+            n2.load(fref2 + 48);
+            n3.load(fref3 + 48);
+
+            sad1.addSumAbsDiff(m1, n1);
+            sad2.addSumAbsDiff(m1, n2);
+            sad3.addSumAbsDiff(m1, n3);
+
+            fenc += FENC_STRIDE;
+            fref1 += frefstride;
+            fref2 += frefstride;
+            fref3 += frefstride;
+        }
+
+        sum1 += extend_low(sad1) + extend_high(sad1);
+        sum2 += extend_low(sad2) + extend_high(sad2);
+        sum3 += extend_low(sad3) + extend_high(sad3);
+        sad1 = 0;
+        sad2 = 0;
+        sad3 = 0;
+    }
+
+    res[0] = horizontal_add(sum1);
+    res[1] = horizontal_add(sum2);
+    res[2] = horizontal_add(sum3);
+}
+
+#if INSTRSET >= X265_CPU_LEVEL_SSE41
+#if HAVE_MMX
+template<int ly>
+void sad_x4_4(pixel *fenc, pixel *fref1, pixel *fref2, pixel *fref3, pixel *fref4, intptr_t frefstride, int *res)
+{
+    assert((ly % 4) == 0);
+
+    __m64 sum0 = _mm_setzero_si64();
+    __m64 sum1 = _mm_setzero_si64();
+    __m64 sum2 = _mm_setzero_si64();
+    __m64 sum3 = _mm_setzero_si64();
+
+    __m64 T00, T01, T02, T03, T04, T05, T06, T07;
+    __m64 T0, T1, T2, T3, T4, T5, T6, T7;
+    __m64 T10, T11, T12, T13, T14, T15, T16, T17;
+    __m64 T20, T21, T22, T23, T24, T25, T26, T27;
+
+    if (ly == 4)
+    {
         T00 = _mm_cvtsi32_si64(*(int*)(fenc + 0 * FENC_STRIDE));
         T01 = _mm_cvtsi32_si64(*(int*)(fenc + 1 * FENC_STRIDE));
         T02 = _mm_cvtsi32_si64(*(int*)(fenc + 2 * FENC_STRIDE));
@@ -1392,6 +4438,21 @@
         sum2 = _mm_add_pi16(sum2, T21);
         sum2 = _mm_add_pi16(sum2, T22);
         sum2 = _mm_add_pi16(sum2, T23);
+
+        T10 = _mm_cvtsi32_si64(*(int*)(fref4 + 0 * frefstride));
+        T11 = _mm_cvtsi32_si64(*(int*)(fref4 + 1 * frefstride));
+        T12 = _mm_cvtsi32_si64(*(int*)(fref4 + 2 * frefstride));
+        T13 = _mm_cvtsi32_si64(*(int*)(fref4 + 3 * frefstride));
+
+        T20 = _mm_sad_pu8(T00, T10);
+        T21 = _mm_sad_pu8(T01, T11);
+        T22 = _mm_sad_pu8(T02, T12);
+        T23 = _mm_sad_pu8(T03, T13);
+
+        sum3 = _mm_add_pi16(sum3, T20);
+        sum3 = _mm_add_pi16(sum3, T21);
+        sum3 = _mm_add_pi16(sum3, T22);
+        sum3 = _mm_add_pi16(sum3, T23);
     }
     else if (ly == 8)
     {
@@ -1484,6 +4545,33 @@
         sum2 = _mm_add_pi16(sum2, T25);
         sum2 = _mm_add_pi16(sum2, T26);
         sum2 = _mm_add_pi16(sum2, T27);
+
+        T10 = _mm_cvtsi32_si64(*(int*)(fref4 + 0 * frefstride));
+        T11 = _mm_cvtsi32_si64(*(int*)(fref4 + 1 * frefstride));
+        T12 = _mm_cvtsi32_si64(*(int*)(fref4 + 2 * frefstride));
+        T13 = _mm_cvtsi32_si64(*(int*)(fref4 + 3 * frefstride));
+        T14 = _mm_cvtsi32_si64(*(int*)(fref4 + 4 * frefstride));
+        T15 = _mm_cvtsi32_si64(*(int*)(fref4 + 5 * frefstride));
+        T16 = _mm_cvtsi32_si64(*(int*)(fref4 + 6 * frefstride));
+        T17 = _mm_cvtsi32_si64(*(int*)(fref4 + 7 * frefstride));
+
+        T20 = _mm_sad_pu8(T00, T10);
+        T21 = _mm_sad_pu8(T01, T11);
+        T22 = _mm_sad_pu8(T02, T12);
+        T23 = _mm_sad_pu8(T03, T13);
+        T24 = _mm_sad_pu8(T04, T14);
+        T25 = _mm_sad_pu8(T05, T15);
+        T26 = _mm_sad_pu8(T06, T16);
+        T27 = _mm_sad_pu8(T07, T17);
+
+        sum3 = _mm_add_pi16(sum3, T20);
+        sum3 = _mm_add_pi16(sum3, T21);
+        sum3 = _mm_add_pi16(sum3, T22);
+        sum3 = _mm_add_pi16(sum3, T23);
+        sum3 = _mm_add_pi16(sum3, T24);
+        sum3 = _mm_add_pi16(sum3, T25);
+        sum3 = _mm_add_pi16(sum3, T26);
+        sum3 = _mm_add_pi16(sum3, T27);
     }
     else if (ly == 16)
     {
@@ -1665,6 +4753,60 @@
         sum2 = _mm_add_pi16(sum2, T25);
         sum2 = _mm_add_pi16(sum2, T26);
         sum2 = _mm_add_pi16(sum2, T27);
+
+        T10 = _mm_cvtsi32_si64(*(int*)(fref4 + 0 * frefstride));
+        T11 = _mm_cvtsi32_si64(*(int*)(fref4 + 1 * frefstride));
+        T12 = _mm_cvtsi32_si64(*(int*)(fref4 + 2 * frefstride));
+        T13 = _mm_cvtsi32_si64(*(int*)(fref4 + 3 * frefstride));
+        T14 = _mm_cvtsi32_si64(*(int*)(fref4 + 4 * frefstride));
+        T15 = _mm_cvtsi32_si64(*(int*)(fref4 + 5 * frefstride));
+        T16 = _mm_cvtsi32_si64(*(int*)(fref4 + 6 * frefstride));
+        T17 = _mm_cvtsi32_si64(*(int*)(fref4 + 7 * frefstride));
+
+        T20 = _mm_sad_pu8(T00, T10);
+        T21 = _mm_sad_pu8(T01, T11);
+        T22 = _mm_sad_pu8(T02, T12);
+        T23 = _mm_sad_pu8(T03, T13);
+        T24 = _mm_sad_pu8(T04, T14);
+        T25 = _mm_sad_pu8(T05, T15);
+        T26 = _mm_sad_pu8(T06, T16);
+        T27 = _mm_sad_pu8(T07, T17);
+
+        sum3 = _mm_add_pi16(sum3, T20);
+        sum3 = _mm_add_pi16(sum3, T21);
+        sum3 = _mm_add_pi16(sum3, T22);
+        sum3 = _mm_add_pi16(sum3, T23);
+        sum3 = _mm_add_pi16(sum3, T24);
+        sum3 = _mm_add_pi16(sum3, T25);
+        sum3 = _mm_add_pi16(sum3, T26);
+        sum3 = _mm_add_pi16(sum3, T27);
+
+        T10 = _mm_cvtsi32_si64(*(int*)(fref4 + 8 * frefstride));
+        T11 = _mm_cvtsi32_si64(*(int*)(fref4 + 9 * frefstride));
+        T12 = _mm_cvtsi32_si64(*(int*)(fref4 + 10 * frefstride));
+        T13 = _mm_cvtsi32_si64(*(int*)(fref4 + 11 * frefstride));
+        T14 = _mm_cvtsi32_si64(*(int*)(fref4 + 12 * frefstride));
+        T15 = _mm_cvtsi32_si64(*(int*)(fref4 + 13 * frefstride));
+        T16 = _mm_cvtsi32_si64(*(int*)(fref4 + 14 * frefstride));
+        T17 = _mm_cvtsi32_si64(*(int*)(fref4 + 15 * frefstride));
+
+        T20 = _mm_sad_pu8(T0, T10);
+        T21 = _mm_sad_pu8(T1, T11);
+        T22 = _mm_sad_pu8(T2, T12);
+        T23 = _mm_sad_pu8(T3, T13);
+        T24 = _mm_sad_pu8(T4, T14);
+        T25 = _mm_sad_pu8(T5, T15);
+        T26 = _mm_sad_pu8(T6, T16);
+        T27 = _mm_sad_pu8(T7, T17);
+
+        sum3 = _mm_add_pi16(sum3, T20);
+        sum3 = _mm_add_pi16(sum3, T21);
+        sum3 = _mm_add_pi16(sum3, T22);
+        sum3 = _mm_add_pi16(sum3, T23);
+        sum3 = _mm_add_pi16(sum3, T24);
+        sum3 = _mm_add_pi16(sum3, T25);
+        sum3 = _mm_add_pi16(sum3, T26);
+        sum3 = _mm_add_pi16(sum3, T27);
     }
     else if ((ly % 8) == 0)
     {
@@ -1759,6 +4901,33 @@
             sum2 = _mm_add_pi16(sum2, T25);
             sum2 = _mm_add_pi16(sum2, T26);
             sum2 = _mm_add_pi16(sum2, T27);
+
+            T10 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 0) * frefstride));
+            T11 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 1) * frefstride));
+            T12 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 2) * frefstride));
+            T13 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 3) * frefstride));
+            T14 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 4) * frefstride));
+            T15 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 5) * frefstride));
+            T16 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 6) * frefstride));
+            T17 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 7) * frefstride));
+
+            T20 = _mm_sad_pu8(T00, T10);
+            T21 = _mm_sad_pu8(T01, T11);
+            T22 = _mm_sad_pu8(T02, T12);
+            T23 = _mm_sad_pu8(T03, T13);
+            T24 = _mm_sad_pu8(T04, T14);
+            T25 = _mm_sad_pu8(T05, T15);
+            T26 = _mm_sad_pu8(T06, T16);
+            T27 = _mm_sad_pu8(T07, T17);
+
+            sum3 = _mm_add_pi16(sum3, T20);
+            sum3 = _mm_add_pi16(sum3, T21);
+            sum3 = _mm_add_pi16(sum3, T22);
+            sum3 = _mm_add_pi16(sum3, T23);
+            sum3 = _mm_add_pi16(sum3, T24);
+            sum3 = _mm_add_pi16(sum3, T25);
+            sum3 = _mm_add_pi16(sum3, T26);
+            sum3 = _mm_add_pi16(sum3, T27);
         }
     }
     else
@@ -1814,26 +4983,44 @@
             sum2 = _mm_add_pi16(sum2, T21);
             sum2 = _mm_add_pi16(sum2, T22);
             sum2 = _mm_add_pi16(sum2, T23);
+
+            T10 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 0) * frefstride));
+            T11 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 1) * frefstride));
+            T12 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 2) * frefstride));
+            T13 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 3) * frefstride));
+
+            T20 = _mm_sad_pu8(T00, T10);
+            T21 = _mm_sad_pu8(T01, T11);
+            T22 = _mm_sad_pu8(T02, T12);
+            T23 = _mm_sad_pu8(T03, T13);
+
+            sum3 = _mm_add_pi16(sum3, T20);
+            sum3 = _mm_add_pi16(sum3, T21);
+            sum3 = _mm_add_pi16(sum3, T22);
+            sum3 = _mm_add_pi16(sum3, T23);
         }
     }
+
     res[0] = _m_to_int(sum0);
     res[1] = _m_to_int(sum1);
     res[2] = _m_to_int(sum2);
+    res[3] = _m_to_int(sum3);
 }
 
 #else /* if HAVE_MMX */
 
 template<int ly>
-void sad_x3_4(pixel *fenc, pixel *fref1, pixel *fref2, pixel *fref3, intptr_t frefstride, int *res)
+void sad_x4_4(pixel *fenc, pixel *fref1, pixel *fref2, pixel *fref3, pixel *fref4, intptr_t frefstride, int *res)
 {
     assert((ly % 4) == 0);
     __m128i sum0 = _mm_setzero_si128();
     __m128i sum1 = _mm_setzero_si128();
     __m128i sum2 = _mm_setzero_si128();
+    __m128i sum3 = _mm_setzero_si128();
 
     __m128i T00, T01, T02, T03;
     __m128i T10, T11, T12, T13;
-    __m128i R00, R01, R02, R03;
+    __m128i R00, R01, R02, R03, R04;
     __m128i T20;
 
     if (ly == 4)
@@ -1870,6 +5057,14 @@
         T13 = _mm_unpacklo_epi32(T12, T13);
         R03 = _mm_unpacklo_epi64(T11, T13);
 
+        T10 = _mm_loadl_epi64((__m128i*)(fref4 + (0) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref4 + (1) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref4 + (2) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref4 + (3) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R04 = _mm_unpacklo_epi64(T11, T13);
+
         T20 = _mm_sad_epu8(R00, R01);
         sum0 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
 
@@ -1878,6 +5073,9 @@
 
         T20 = _mm_sad_epu8(R00, R03);
         sum2 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+
+        T20 = _mm_sad_epu8(R00, R04);
+        sum3 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
     }
     else if (ly == 8)
     {
@@ -1913,6 +5111,14 @@
         T13 = _mm_unpacklo_epi32(T12, T13);
         R03 = _mm_unpacklo_epi64(T11, T13);
 
+        T10 = _mm_loadl_epi64((__m128i*)(fref4 + (0) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref4 + (1) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref4 + (2) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref4 + (3) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R04 = _mm_unpacklo_epi64(T11, T13);
+
         T20 = _mm_sad_epu8(R00, R01);
         sum0 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
 
@@ -1922,6 +5128,9 @@
         T20 = _mm_sad_epu8(R00, R03);
         sum2 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
 
+        T20 = _mm_sad_epu8(R00, R04);
+        sum3 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+
         T00 = _mm_loadl_epi64((__m128i*)(fenc + (4) * FENC_STRIDE));
         T01 = _mm_loadl_epi64((__m128i*)(fenc + (5) * FENC_STRIDE));
         T01 = _mm_unpacklo_epi32(T00, T01);
@@ -1954,6 +5163,14 @@
         T13 = _mm_unpacklo_epi32(T12, T13);
         R03 = _mm_unpacklo_epi64(T11, T13);
 
+        T10 = _mm_loadl_epi64((__m128i*)(fref4 + (4) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref4 + (5) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref4 + (6) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref4 + (7) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R04 = _mm_unpacklo_epi64(T11, T13);
+
         T20 = _mm_sad_epu8(R00, R01);
         T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
         sum0 = _mm_add_epi32(sum0, T20);
@@ -1965,6 +5182,10 @@
         T20 = _mm_sad_epu8(R00, R03);
         T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
         sum2 = _mm_add_epi32(sum2, T20);
+
+        T20 = _mm_sad_epu8(R00, R04);
+        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+        sum3 = _mm_add_epi32(sum3, T20);
     }
     else if (ly == 16)
     {
@@ -2000,6 +5221,14 @@
         T13 = _mm_unpacklo_epi32(T12, T13);
         R03 = _mm_unpacklo_epi64(T11, T13);
 
+        T10 = _mm_loadl_epi64((__m128i*)(fref4 + (0) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref4 + (1) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref4 + (2) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref4 + (3) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R04 = _mm_unpacklo_epi64(T11, T13);
+
         T20 = _mm_sad_epu8(R00, R01);
         sum0 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
 
@@ -2009,6 +5238,9 @@
         T20 = _mm_sad_epu8(R00, R03);
         sum2 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
 
+        T20 = _mm_sad_epu8(R00, R04);
+        sum3 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+
         T00 = _mm_loadl_epi64((__m128i*)(fenc + (4) * FENC_STRIDE));
         T01 = _mm_loadl_epi64((__m128i*)(fenc + (5) * FENC_STRIDE));
         T01 = _mm_unpacklo_epi32(T00, T01);
@@ -2041,6 +5273,14 @@
         T13 = _mm_unpacklo_epi32(T12, T13);
         R03 = _mm_unpacklo_epi64(T11, T13);
 
+        T10 = _mm_loadl_epi64((__m128i*)(fref4 + (4) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref4 + (5) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref4 + (6) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref4 + (7) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R04 = _mm_unpacklo_epi64(T11, T13);
+
         T20 = _mm_sad_epu8(R00, R01);
         T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
         sum0 = _mm_add_epi32(sum0, T20);
@@ -2053,6 +5293,10 @@
         T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
         sum2 = _mm_add_epi32(sum2, T20);
 
+        T20 = _mm_sad_epu8(R00, R04);
+        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+        sum3 = _mm_add_epi32(sum3, T20);
+
         T00 = _mm_loadl_epi64((__m128i*)(fenc + (8) * FENC_STRIDE));
         T01 = _mm_loadl_epi64((__m128i*)(fenc + (9) * FENC_STRIDE));
         T01 = _mm_unpacklo_epi32(T00, T01);
@@ -2085,6 +5329,14 @@
         T13 = _mm_unpacklo_epi32(T12, T13);
         R03 = _mm_unpacklo_epi64(T11, T13);
 
+        T10 = _mm_loadl_epi64((__m128i*)(fref4 + (8) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref4 + (9) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref4 + (10) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref4 + (11) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R04 = _mm_unpacklo_epi64(T11, T13);
+
         T20 = _mm_sad_epu8(R00, R01);
         T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
         sum0 = _mm_add_epi32(sum0, T20);
@@ -2097,6 +5349,10 @@
         T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
         sum2 = _mm_add_epi32(sum2, T20);
 
+        T20 = _mm_sad_epu8(R00, R04);
+        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+        sum3 = _mm_add_epi32(sum3, T20);
+
         T00 = _mm_loadl_epi64((__m128i*)(fenc + (12) * FENC_STRIDE));
         T01 = _mm_loadl_epi64((__m128i*)(fenc + (13) * FENC_STRIDE));
         T01 = _mm_unpacklo_epi32(T00, T01);
@@ -2129,6 +5385,14 @@
         T13 = _mm_unpacklo_epi32(T12, T13);
         R03 = _mm_unpacklo_epi64(T11, T13);
 
+        T10 = _mm_loadl_epi64((__m128i*)(fref4 + (12) * frefstride));
+        T11 = _mm_loadl_epi64((__m128i*)(fref4 + (13) * frefstride));
+        T11 = _mm_unpacklo_epi32(T10, T11);
+        T12 = _mm_loadl_epi64((__m128i*)(fref4 + (14) * frefstride));
+        T13 = _mm_loadl_epi64((__m128i*)(fref4 + (15) * frefstride));
+        T13 = _mm_unpacklo_epi32(T12, T13);
+        R04 = _mm_unpacklo_epi64(T11, T13);
+
         T20 = _mm_sad_epu8(R00, R01);
         T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
         sum0 = _mm_add_epi32(sum0, T20);
@@ -2140,6 +5404,10 @@
         T20 = _mm_sad_epu8(R00, R03);
         T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
         sum2 = _mm_add_epi32(sum2, T20);
+
+        T20 = _mm_sad_epu8(R00, R04);
+        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+        sum3 = _mm_add_epi32(sum3, T20);
     }
     else if ((ly % 8) == 0)
     {
@@ -2177,6 +5445,14 @@
             T13 = _mm_unpacklo_epi32(T12, T13);
             R03 = _mm_unpacklo_epi64(T11, T13);
 
+            T10 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 0) * frefstride));
+            T11 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 1) * frefstride));
+            T11 = _mm_unpacklo_epi32(T10, T11);
+            T12 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 2) * frefstride));
+            T13 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 3) * frefstride));
+            T13 = _mm_unpacklo_epi32(T12, T13);
+            R04 = _mm_unpacklo_epi64(T11, T13);
+
             T20 = _mm_sad_epu8(R00, R01);
             T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
             sum0 = _mm_add_epi32(sum0, T20);
@@ -2189,6 +5465,10 @@
             T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
             sum2 = _mm_add_epi32(sum2, T20);
 
+            T20 = _mm_sad_epu8(R00, R04);
+            T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+            sum3 = _mm_add_epi32(sum3, T20);
+
             T00 = _mm_loadl_epi64((__m128i*)(fenc + (i + 4) * FENC_STRIDE));
             T01 = _mm_loadl_epi64((__m128i*)(fenc + (i + 5) * FENC_STRIDE));
             T01 = _mm_unpacklo_epi32(T00, T01);
@@ -2221,6 +5501,14 @@
             T13 = _mm_unpacklo_epi32(T12, T13);
             R03 = _mm_unpacklo_epi64(T11, T13);
 
+            T10 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 4) * frefstride));
+            T11 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 5) * frefstride));
+            T11 = _mm_unpacklo_epi32(T10, T11);
+            T12 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 6) * frefstride));
+            T13 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 7) * frefstride));
+            T13 = _mm_unpacklo_epi32(T12, T13);
+            R04 = _mm_unpacklo_epi64(T11, T13);
+
             T20 = _mm_sad_epu8(R00, R01);
             T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
             sum0 = _mm_add_epi32(sum0, T20);
@@ -2232,6 +5520,10 @@
             T20 = _mm_sad_epu8(R00, R03);
             T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
             sum2 = _mm_add_epi32(sum2, T20);
+
+            T20 = _mm_sad_epu8(R00, R04);
+            T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+            sum3 = _mm_add_epi32(sum3, T20);
         }
     }
     else
@@ -2270,6 +5562,14 @@
             T13 = _mm_unpacklo_epi32(T12, T13);
             R03 = _mm_unpacklo_epi64(T11, T13);
 
+            T10 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 0) * frefstride));
+            T11 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 1) * frefstride));
+            T11 = _mm_unpacklo_epi32(T10, T11);
+            T12 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 2) * frefstride));
+            T13 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 3) * frefstride));
+            T13 = _mm_unpacklo_epi32(T12, T13);
+            R04 = _mm_unpacklo_epi64(T11, T13);
+
             T20 = _mm_sad_epu8(R00, R01);
             T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
             sum0 = _mm_add_epi32(sum0, T20);
@@ -2281,32 +5581,37 @@
             T20 = _mm_sad_epu8(R00, R03);
             T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
             sum2 = _mm_add_epi32(sum2, T20);
+
+            T20 = _mm_sad_epu8(R00, R04);
+            T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+            sum3 = _mm_add_epi32(sum3, T20);
         }
     }
-
     res[0] = _mm_cvtsi128_si32(sum0);
     res[1] = _mm_cvtsi128_si32(sum1);
     res[2] = _mm_cvtsi128_si32(sum2);
+    res[3] = _mm_cvtsi128_si32(sum3);
 }
 
 #endif /* if HAVE_MMX */
 
 #if HAVE_MMX
 template<int ly>
-void sad_x3_8(pixel *fenc, pixel *fref1, pixel *fref2, pixel *fref3, intptr_t frefstride, int *res)
+void sad_x4_8(pixel *fenc, pixel *fref1, pixel *fref2, pixel *fref3, pixel *fref4, intptr_t frefstride, int *res)
 {
     assert((ly % 4) == 0);
 
     __m64 sum0 = _mm_setzero_si64();
     __m64 sum1 = _mm_setzero_si64();
     __m64 sum2 = _mm_setzero_si64();
+    __m64 sum3 = _mm_setzero_si64();
 
     __m64 T00, T01, T02, T03, T04, T05, T06, T07;
     __m64 T0, T1, T2, T3, T4, T5, T6, T7;
     __m64 T10, T11, T12, T13, T14, T15, T16, T17;
     __m64 T20, T21, T22, T23, T24, T25, T26, T27;
 
-    if (ly == 4)
+    if (4 == ly)
     {
         T00 = (*(__m64*)(fenc + 0 * FENC_STRIDE));
         T01 = (*(__m64*)(fenc + 1 * FENC_STRIDE));
@@ -2357,8 +5662,23 @@
         sum2 = _mm_add_pi16(sum2, T21);
         sum2 = _mm_add_pi16(sum2, T22);
         sum2 = _mm_add_pi16(sum2, T23);
-    }
-    else if (ly == 8)
+
+        T10 = (*(__m64*)(fref4 + 0 * frefstride));
+        T11 = (*(__m64*)(fref4 + 1 * frefstride));
+        T12 = (*(__m64*)(fref4 + 2 * frefstride));
+        T13 = (*(__m64*)(fref4 + 3 * frefstride));
+
+        T20 = _mm_sad_pu8(T00, T10);
+        T21 = _mm_sad_pu8(T01, T11);
+        T22 = _mm_sad_pu8(T02, T12);
+        T23 = _mm_sad_pu8(T03, T13);
+
+        sum3 = _mm_add_pi16(sum3, T20);
+        sum3 = _mm_add_pi16(sum3, T21);
+        sum3 = _mm_add_pi16(sum3, T22);
+        sum3 = _mm_add_pi16(sum3, T23);
+    }
+    else if (8 == ly)
     {
         T00 = (*(__m64*)(fenc + 0 * FENC_STRIDE));
         T01 = (*(__m64*)(fenc + 1 * FENC_STRIDE));
@@ -2449,8 +5769,35 @@
         sum2 = _mm_add_pi16(sum2, T25);
         sum2 = _mm_add_pi16(sum2, T26);
         sum2 = _mm_add_pi16(sum2, T27);
-    }
-    else if (ly == 16)
+
+        T10 = (*(__m64*)(fref4 + 0 * frefstride));
+        T11 = (*(__m64*)(fref4 + 1 * frefstride));
+        T12 = (*(__m64*)(fref4 + 2 * frefstride));
+        T13 = (*(__m64*)(fref4 + 3 * frefstride));
+        T14 = (*(__m64*)(fref4 + 4 * frefstride));
+        T15 = (*(__m64*)(fref4 + 5 * frefstride));
+        T16 = (*(__m64*)(fref4 + 6 * frefstride));
+        T17 = (*(__m64*)(fref4 + 7 * frefstride));
+
+        T20 = _mm_sad_pu8(T00, T10);
+        T21 = _mm_sad_pu8(T01, T11);
+        T22 = _mm_sad_pu8(T02, T12);
+        T23 = _mm_sad_pu8(T03, T13);
+        T24 = _mm_sad_pu8(T04, T14);
+        T25 = _mm_sad_pu8(T05, T15);
+        T26 = _mm_sad_pu8(T06, T16);
+        T27 = _mm_sad_pu8(T07, T17);
+
+        sum3 = _mm_add_pi16(sum3, T20);
+        sum3 = _mm_add_pi16(sum3, T21);
+        sum3 = _mm_add_pi16(sum3, T22);
+        sum3 = _mm_add_pi16(sum3, T23);
+        sum3 = _mm_add_pi16(sum3, T24);
+        sum3 = _mm_add_pi16(sum3, T25);
+        sum3 = _mm_add_pi16(sum3, T26);
+        sum3 = _mm_add_pi16(sum3, T27);
+    }
+    else if (16 == ly)
     {
         T00 = (*(__m64*)(fenc + 0 * FENC_STRIDE));
         T01 = (*(__m64*)(fenc + 1 * FENC_STRIDE));
@@ -2630,3328 +5977,6 @@
         sum2 = _mm_add_pi16(sum2, T25);
         sum2 = _mm_add_pi16(sum2, T26);
         sum2 = _mm_add_pi16(sum2, T27);
-    }
-    else if ((ly % 8) == 0)
-    {
-        for (int i = 0; i < ly; i += 8)
-        {
-            T00 = (*(__m64*)(fenc + (i + 0) * FENC_STRIDE));
-            T01 = (*(__m64*)(fenc + (i + 1) * FENC_STRIDE));
-            T02 = (*(__m64*)(fenc + (i + 2) * FENC_STRIDE));
-            T03 = (*(__m64*)(fenc + (i + 3) * FENC_STRIDE));
-            T04 = (*(__m64*)(fenc + (i + 4) * FENC_STRIDE));
-            T05 = (*(__m64*)(fenc + (i + 5) * FENC_STRIDE));
-            T06 = (*(__m64*)(fenc + (i + 6) * FENC_STRIDE));
-            T07 = (*(__m64*)(fenc + (i + 7) * FENC_STRIDE));
-
-            T10 = (*(__m64*)(fref1 + (i + 0) * frefstride));
-            T11 = (*(__m64*)(fref1 + (i + 1) * frefstride));
-            T12 = (*(__m64*)(fref1 + (i + 2) * frefstride));
-            T13 = (*(__m64*)(fref1 + (i + 3) * frefstride));
-            T14 = (*(__m64*)(fref1 + (i + 4) * frefstride));
-            T15 = (*(__m64*)(fref1 + (i + 5) * frefstride));
-            T16 = (*(__m64*)(fref1 + (i + 6) * frefstride));
-            T17 = (*(__m64*)(fref1 + (i + 7) * frefstride));
-
-            T20 = _mm_sad_pu8(T00, T10);
-            T21 = _mm_sad_pu8(T01, T11);
-            T22 = _mm_sad_pu8(T02, T12);
-            T23 = _mm_sad_pu8(T03, T13);
-            T24 = _mm_sad_pu8(T04, T14);
-            T25 = _mm_sad_pu8(T05, T15);
-            T26 = _mm_sad_pu8(T06, T16);
-            T27 = _mm_sad_pu8(T07, T17);
-
-            sum0 = _mm_add_pi16(sum0, T20);
-            sum0 = _mm_add_pi16(sum0, T21);
-            sum0 = _mm_add_pi16(sum0, T22);
-            sum0 = _mm_add_pi16(sum0, T23);
-            sum0 = _mm_add_pi16(sum0, T24);
-            sum0 = _mm_add_pi16(sum0, T25);
-            sum0 = _mm_add_pi16(sum0, T26);
-            sum0 = _mm_add_pi16(sum0, T27);
-
-            T10 = (*(__m64*)(fref2 + (i + 0) * frefstride));
-            T11 = (*(__m64*)(fref2 + (i + 1) * frefstride));
-            T12 = (*(__m64*)(fref2 + (i + 2) * frefstride));
-            T13 = (*(__m64*)(fref2 + (i + 3) * frefstride));
-            T14 = (*(__m64*)(fref2 + (i + 4) * frefstride));
-            T15 = (*(__m64*)(fref2 + (i + 5) * frefstride));
-            T16 = (*(__m64*)(fref2 + (i + 6) * frefstride));
-            T17 = (*(__m64*)(fref2 + (i + 7) * frefstride));
-
-            T20 = _mm_sad_pu8(T00, T10);
-            T21 = _mm_sad_pu8(T01, T11);
-            T22 = _mm_sad_pu8(T02, T12);
-            T23 = _mm_sad_pu8(T03, T13);
-            T24 = _mm_sad_pu8(T04, T14);
-            T25 = _mm_sad_pu8(T05, T15);
-            T26 = _mm_sad_pu8(T06, T16);
-            T27 = _mm_sad_pu8(T07, T17);
-
-            sum1 = _mm_add_pi16(sum1, T20);
-            sum1 = _mm_add_pi16(sum1, T21);
-            sum1 = _mm_add_pi16(sum1, T22);
-            sum1 = _mm_add_pi16(sum1, T23);
-            sum1 = _mm_add_pi16(sum1, T24);
-            sum1 = _mm_add_pi16(sum1, T25);
-            sum1 = _mm_add_pi16(sum1, T26);
-            sum1 = _mm_add_pi16(sum1, T27);
-
-            T10 = (*(__m64*)(fref3 + (i + 0) * frefstride));
-            T11 = (*(__m64*)(fref3 + (i + 1) * frefstride));
-            T12 = (*(__m64*)(fref3 + (i + 2) * frefstride));
-            T13 = (*(__m64*)(fref3 + (i + 3) * frefstride));
-            T14 = (*(__m64*)(fref3 + (i + 4) * frefstride));
-            T15 = (*(__m64*)(fref3 + (i + 5) * frefstride));
-            T16 = (*(__m64*)(fref3 + (i + 6) * frefstride));
-            T17 = (*(__m64*)(fref3 + (i + 7) * frefstride));
-
-            T20 = _mm_sad_pu8(T00, T10);
-            T21 = _mm_sad_pu8(T01, T11);
-            T22 = _mm_sad_pu8(T02, T12);
-            T23 = _mm_sad_pu8(T03, T13);
-            T24 = _mm_sad_pu8(T04, T14);
-            T25 = _mm_sad_pu8(T05, T15);
-            T26 = _mm_sad_pu8(T06, T16);
-            T27 = _mm_sad_pu8(T07, T17);
-
-            sum2 = _mm_add_pi16(sum2, T20);
-            sum2 = _mm_add_pi16(sum2, T21);
-            sum2 = _mm_add_pi16(sum2, T22);
-            sum2 = _mm_add_pi16(sum2, T23);
-            sum2 = _mm_add_pi16(sum2, T24);
-            sum2 = _mm_add_pi16(sum2, T25);
-            sum2 = _mm_add_pi16(sum2, T26);
-            sum2 = _mm_add_pi16(sum2, T27);
-        }
-    }
-    else
-    {
-        for (int i = 0; i < ly; i += 4)
-        {
-            T00 = (*(__m64*)(fenc + (i + 0) * FENC_STRIDE));
-            T01 = (*(__m64*)(fenc + (i + 1) * FENC_STRIDE));
-            T02 = (*(__m64*)(fenc + (i + 2) * FENC_STRIDE));
-            T03 = (*(__m64*)(fenc + (i + 3) * FENC_STRIDE));
-
-            T10 = (*(__m64*)(fref1 + (i + 0) * frefstride));
-            T11 = (*(__m64*)(fref1 + (i + 1) * frefstride));
-            T12 = (*(__m64*)(fref1 + (i + 2) * frefstride));
-            T13 = (*(__m64*)(fref1 + (i + 3) * frefstride));
-
-            T20 = _mm_sad_pu8(T00, T10);
-            T21 = _mm_sad_pu8(T01, T11);
-            T22 = _mm_sad_pu8(T02, T12);
-            T23 = _mm_sad_pu8(T03, T13);
-
-            sum0 = _mm_add_pi16(sum0, T20);
-            sum0 = _mm_add_pi16(sum0, T21);
-            sum0 = _mm_add_pi16(sum0, T22);
-            sum0 = _mm_add_pi16(sum0, T23);
-
-            T10 = (*(__m64*)(fref2 + (i + 0) * frefstride));
-            T11 = (*(__m64*)(fref2 + (i + 1) * frefstride));
-            T12 = (*(__m64*)(fref2 + (i + 2) * frefstride));
-            T13 = (*(__m64*)(fref2 + (i + 3) * frefstride));
-
-            T20 = _mm_sad_pu8(T00, T10);
-            T21 = _mm_sad_pu8(T01, T11);
-            T22 = _mm_sad_pu8(T02, T12);
-            T23 = _mm_sad_pu8(T03, T13);
-
-            sum1 = _mm_add_pi16(sum1, T20);
-            sum1 = _mm_add_pi16(sum1, T21);
-            sum1 = _mm_add_pi16(sum1, T22);
-            sum1 = _mm_add_pi16(sum1, T23);
-
-            T10 = (*(__m64*)(fref3 + (i + 0) * frefstride));
-            T11 = (*(__m64*)(fref3 + (i + 1) * frefstride));
-            T12 = (*(__m64*)(fref3 + (i + 2) * frefstride));
-            T13 = (*(__m64*)(fref3 + (i + 3) * frefstride));
-
-            T20 = _mm_sad_pu8(T00, T10);
-            T21 = _mm_sad_pu8(T01, T11);
-            T22 = _mm_sad_pu8(T02, T12);
-            T23 = _mm_sad_pu8(T03, T13);
-
-            sum2 = _mm_add_pi16(sum2, T20);
-            sum2 = _mm_add_pi16(sum2, T21);
-            sum2 = _mm_add_pi16(sum2, T22);
-            sum2 = _mm_add_pi16(sum2, T23);
-        }
-    }
-
-    res[0] = _m_to_int(sum0);
-    res[1] = _m_to_int(sum1);
-    res[2] = _m_to_int(sum2);
-}
-
-#else /* if HAVE_MMX */
-
-template<int ly>
-void sad_x3_8(pixel *fenc, pixel *fref1, pixel *fref2, pixel *fref3, intptr_t frefstride, int *res)
-{
-    assert((ly % 4) == 0);
-    __m128i sum0 = _mm_setzero_si128();
-
-    __m128i T00, T01, T02, T03;
-    __m128i T10, T11, T12, T13;
-    __m128i T20, T21;
-
-    if (ly == 4)
-    {
-        T00 = _mm_loadl_epi64((__m128i*)(fenc + (0) * FENC_STRIDE));
-        T01 = _mm_loadl_epi64((__m128i*)(fenc + (1) * FENC_STRIDE));
-        T01 = _mm_unpacklo_epi64(T00, T01);
-        T02 = _mm_loadl_epi64((__m128i*)(fenc + (2) * FENC_STRIDE));
-        T03 = _mm_loadl_epi64((__m128i*)(fenc + (3) * FENC_STRIDE));
-        T03 = _mm_unpacklo_epi64(T02, T03);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref1 + (0) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref1 + (1) * frefstride));
-        T11 = _mm_unpacklo_epi64(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (2) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (3) * frefstride));
-        T13 = _mm_unpacklo_epi64(T12, T13);
-
-        T20 = _mm_sad_epu8(T01, T11);
-        T21 = _mm_sad_epu8(T03, T13);
-        T21 = _mm_add_epi32(T20, T21);
-        sum0 = _mm_shuffle_epi32(T21, 2);
-        sum0 = _mm_add_epi32(sum0, T21);
-        res[0] = _mm_cvtsi128_si32(sum0);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref2 + (0) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref2 + (1) * frefstride));
-        T11 = _mm_unpacklo_epi64(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (2) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (3) * frefstride));
-        T13 = _mm_unpacklo_epi64(T12, T13);
-
-        T20 = _mm_sad_epu8(T01, T11);
-        T21 = _mm_sad_epu8(T03, T13);
-        T21 = _mm_add_epi32(T20, T21);
-        sum0 = _mm_shuffle_epi32(T21, 2);
-        sum0 = _mm_add_epi32(sum0, T21);
-        res[1] = _mm_cvtsi128_si32(sum0);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref3 + (0) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref3 + (1) * frefstride));
-        T11 = _mm_unpacklo_epi64(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (2) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (3) * frefstride));
-        T13 = _mm_unpacklo_epi64(T12, T13);
-
-        T20 = _mm_sad_epu8(T01, T11);
-        T21 = _mm_sad_epu8(T03, T13);
-        T21 = _mm_add_epi32(T20, T21);
-        sum0 = _mm_shuffle_epi32(T21, 2);
-        sum0 = _mm_add_epi32(sum0, T21);
-        res[2] = _mm_cvtsi128_si32(sum0);
-    }
-    else if (ly == 8)
-    {
-        T00 = _mm_loadl_epi64((__m128i*)(fenc + (0) * FENC_STRIDE));
-        T01 = _mm_loadl_epi64((__m128i*)(fenc + (1) * FENC_STRIDE));
-        T01 = _mm_unpacklo_epi64(T00, T01);
-        T02 = _mm_loadl_epi64((__m128i*)(fenc + (2) * FENC_STRIDE));
-        T03 = _mm_loadl_epi64((__m128i*)(fenc + (3) * FENC_STRIDE));
-        T03 = _mm_unpacklo_epi64(T02, T03);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref1 + (0) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref1 + (1) * frefstride));
-        T11 = _mm_unpacklo_epi64(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (2) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (3) * frefstride));
-        T13 = _mm_unpacklo_epi64(T12, T13);
-
-        T20 = _mm_sad_epu8(T01, T11);
-        T21 = _mm_sad_epu8(T03, T13);
-        T21 = _mm_add_epi32(T20, T21);
-        sum0 = _mm_shuffle_epi32(T21, 2);
-        sum0 = _mm_add_epi32(sum0, T21);
-        res[0] = _mm_cvtsi128_si32(sum0);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref2 + (0) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref2 + (1) * frefstride));
-        T11 = _mm_unpacklo_epi64(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (2) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (3) * frefstride));
-        T13 = _mm_unpacklo_epi64(T12, T13);
-
-        T20 = _mm_sad_epu8(T01, T11);
-        T21 = _mm_sad_epu8(T03, T13);
-        T21 = _mm_add_epi32(T20, T21);
-        sum0 = _mm_shuffle_epi32(T21, 2);
-        sum0 = _mm_add_epi32(sum0, T21);
-        res[1] = _mm_cvtsi128_si32(sum0);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref3 + (0) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref3 + (1) * frefstride));
-        T11 = _mm_unpacklo_epi64(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (2) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (3) * frefstride));
-        T13 = _mm_unpacklo_epi64(T12, T13);
-
-        T20 = _mm_sad_epu8(T01, T11);
-        T21 = _mm_sad_epu8(T03, T13);
-        T21 = _mm_add_epi32(T20, T21);
-        sum0 = _mm_shuffle_epi32(T21, 2);
-        sum0 = _mm_add_epi32(sum0, T21);
-        res[2] = _mm_cvtsi128_si32(sum0);
-
-        T00 = _mm_loadl_epi64((__m128i*)(fenc + (4) * FENC_STRIDE));
-        T01 = _mm_loadl_epi64((__m128i*)(fenc + (5) * FENC_STRIDE));
-        T01 = _mm_unpacklo_epi64(T00, T01);
-        T02 = _mm_loadl_epi64((__m128i*)(fenc + (6) * FENC_STRIDE));
-        T03 = _mm_loadl_epi64((__m128i*)(fenc + (7) * FENC_STRIDE));
-        T03 = _mm_unpacklo_epi64(T02, T03);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref1 + (4) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref1 + (5) * frefstride));
-        T11 = _mm_unpacklo_epi64(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (6) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (7) * frefstride));
-        T13 = _mm_unpacklo_epi64(T12, T13);
-
-        T20 = _mm_sad_epu8(T01, T11);
-        T21 = _mm_sad_epu8(T03, T13);
-        T21 = _mm_add_epi32(T20, T21);
-        sum0 = _mm_shuffle_epi32(T21, 2);
-        sum0 = _mm_add_epi32(sum0, T21);
-        res[0] = res[0] + _mm_cvtsi128_si32(sum0);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref2 + (4) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref2 + (5) * frefstride));
-        T11 = _mm_unpacklo_epi64(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (6) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (7) * frefstride));
-        T13 = _mm_unpacklo_epi64(T12, T13);
-
-        T20 = _mm_sad_epu8(T01, T11);
-        T21 = _mm_sad_epu8(T03, T13);
-        T21 = _mm_add_epi32(T20, T21);
-        sum0 = _mm_shuffle_epi32(T21, 2);
-        sum0 = _mm_add_epi32(sum0, T21);
-        res[1] = res[1] + _mm_cvtsi128_si32(sum0);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref3 + (4) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref3 + (5) * frefstride));
-        T11 = _mm_unpacklo_epi64(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (6) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (7) * frefstride));
-        T13 = _mm_unpacklo_epi64(T12, T13);
-
-        T20 = _mm_sad_epu8(T01, T11);
-        T21 = _mm_sad_epu8(T03, T13);
-        T21 = _mm_add_epi32(T20, T21);
-        sum0 = _mm_shuffle_epi32(T21, 2);
-        sum0 = _mm_add_epi32(sum0, T21);
-        res[2] = res[2] + _mm_cvtsi128_si32(sum0);
-    }
-    else if (ly == 16)
-    {
-        T00 = _mm_loadl_epi64((__m128i*)(fenc + (0) * FENC_STRIDE));
-        T01 = _mm_loadl_epi64((__m128i*)(fenc + (1) * FENC_STRIDE));
-        T01 = _mm_unpacklo_epi64(T00, T01);
-        T02 = _mm_loadl_epi64((__m128i*)(fenc + (2) * FENC_STRIDE));
-        T03 = _mm_loadl_epi64((__m128i*)(fenc + (3) * FENC_STRIDE));
-        T03 = _mm_unpacklo_epi64(T02, T03);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref1 + (0) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref1 + (1) * frefstride));
-        T11 = _mm_unpacklo_epi64(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (2) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (3) * frefstride));
-        T13 = _mm_unpacklo_epi64(T12, T13);
-
-        T20 = _mm_sad_epu8(T01, T11);
-        T21 = _mm_sad_epu8(T03, T13);
-        T21 = _mm_add_epi32(T20, T21);
-        sum0 = _mm_shuffle_epi32(T21, 2);
-        sum0 = _mm_add_epi32(sum0, T21);
-        res[0] = _mm_cvtsi128_si32(sum0);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref2 + (0) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref2 + (1) * frefstride));
-        T11 = _mm_unpacklo_epi64(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (2) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (3) * frefstride));
-        T13 = _mm_unpacklo_epi64(T12, T13);
-
-        T20 = _mm_sad_epu8(T01, T11);
-        T21 = _mm_sad_epu8(T03, T13);
-        T21 = _mm_add_epi32(T20, T21);
-        sum0 = _mm_shuffle_epi32(T21, 2);
-        sum0 = _mm_add_epi32(sum0, T21);
-        res[1] = _mm_cvtsi128_si32(sum0);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref3 + (0) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref3 + (1) * frefstride));
-        T11 = _mm_unpacklo_epi64(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (2) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (3) * frefstride));
-        T13 = _mm_unpacklo_epi64(T12, T13);
-
-        T20 = _mm_sad_epu8(T01, T11);
-        T21 = _mm_sad_epu8(T03, T13);
-        T21 = _mm_add_epi32(T20, T21);
-        sum0 = _mm_shuffle_epi32(T21, 2);
-        sum0 = _mm_add_epi32(sum0, T21);
-        res[2] = _mm_cvtsi128_si32(sum0);
-
-        T00 = _mm_loadl_epi64((__m128i*)(fenc + (4) * FENC_STRIDE));
-        T01 = _mm_loadl_epi64((__m128i*)(fenc + (5) * FENC_STRIDE));
-        T01 = _mm_unpacklo_epi64(T00, T01);
-        T02 = _mm_loadl_epi64((__m128i*)(fenc + (6) * FENC_STRIDE));
-        T03 = _mm_loadl_epi64((__m128i*)(fenc + (7) * FENC_STRIDE));
-        T03 = _mm_unpacklo_epi64(T02, T03);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref1 + (4) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref1 + (5) * frefstride));
-        T11 = _mm_unpacklo_epi64(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (6) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (7) * frefstride));
-        T13 = _mm_unpacklo_epi64(T12, T13);
-
-        T20 = _mm_sad_epu8(T01, T11);
-        T21 = _mm_sad_epu8(T03, T13);
-        T21 = _mm_add_epi32(T20, T21);
-        sum0 = _mm_shuffle_epi32(T21, 2);
-        sum0 = _mm_add_epi32(sum0, T21);
-        res[0] = res[0] + _mm_cvtsi128_si32(sum0);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref2 + (4) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref2 + (5) * frefstride));
-        T11 = _mm_unpacklo_epi64(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (6) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (7) * frefstride));
-        T13 = _mm_unpacklo_epi64(T12, T13);
-
-        T20 = _mm_sad_epu8(T01, T11);
-        T21 = _mm_sad_epu8(T03, T13);
-        T21 = _mm_add_epi32(T20, T21);
-        sum0 = _mm_shuffle_epi32(T21, 2);
-        sum0 = _mm_add_epi32(sum0, T21);
-        res[1] = res[1] + _mm_cvtsi128_si32(sum0);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref3 + (4) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref3 + (5) * frefstride));
-        T11 = _mm_unpacklo_epi64(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (6) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (7) * frefstride));
-        T13 = _mm_unpacklo_epi64(T12, T13);
-
-        T20 = _mm_sad_epu8(T01, T11);
-        T21 = _mm_sad_epu8(T03, T13);
-        T21 = _mm_add_epi32(T20, T21);
-        sum0 = _mm_shuffle_epi32(T21, 2);
-        sum0 = _mm_add_epi32(sum0, T21);
-        res[2] = res[2] + _mm_cvtsi128_si32(sum0);
-
-        T00 = _mm_loadl_epi64((__m128i*)(fenc + (8) * FENC_STRIDE));
-        T01 = _mm_loadl_epi64((__m128i*)(fenc + (9) * FENC_STRIDE));
-        T01 = _mm_unpacklo_epi64(T00, T01);
-        T02 = _mm_loadl_epi64((__m128i*)(fenc + (10) * FENC_STRIDE));
-        T03 = _mm_loadl_epi64((__m128i*)(fenc + (11) * FENC_STRIDE));
-        T03 = _mm_unpacklo_epi64(T02, T03);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref1 + (8) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref1 + (9) * frefstride));
-        T11 = _mm_unpacklo_epi64(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (10) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (11) * frefstride));
-        T13 = _mm_unpacklo_epi64(T12, T13);
-
-        T20 = _mm_sad_epu8(T01, T11);
-        T21 = _mm_sad_epu8(T03, T13);
-        T21 = _mm_add_epi32(T20, T21);
-        sum0 = _mm_shuffle_epi32(T21, 2);
-        sum0 = _mm_add_epi32(sum0, T21);
-        res[0] = res[0] + _mm_cvtsi128_si32(sum0);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref2 + (8) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref2 + (9) * frefstride));
-        T11 = _mm_unpacklo_epi64(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (10) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (11) * frefstride));
-        T13 = _mm_unpacklo_epi64(T12, T13);
-
-        T20 = _mm_sad_epu8(T01, T11);
-        T21 = _mm_sad_epu8(T03, T13);
-        T21 = _mm_add_epi32(T20, T21);
-        sum0 = _mm_shuffle_epi32(T21, 2);
-        sum0 = _mm_add_epi32(sum0, T21);
-        res[1] = res[1] + _mm_cvtsi128_si32(sum0);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref3 + (8) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref3 + (9) * frefstride));
-        T11 = _mm_unpacklo_epi64(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (10) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (11) * frefstride));
-        T13 = _mm_unpacklo_epi64(T12, T13);
-
-        T20 = _mm_sad_epu8(T01, T11);
-        T21 = _mm_sad_epu8(T03, T13);
-        T21 = _mm_add_epi32(T20, T21);
-        sum0 = _mm_shuffle_epi32(T21, 2);
-        sum0 = _mm_add_epi32(sum0, T21);
-        res[2] = res[2] + _mm_cvtsi128_si32(sum0);
-
-        T00 = _mm_loadl_epi64((__m128i*)(fenc + (12) * FENC_STRIDE));
-        T01 = _mm_loadl_epi64((__m128i*)(fenc + (13) * FENC_STRIDE));
-        T01 = _mm_unpacklo_epi64(T00, T01);
-        T02 = _mm_loadl_epi64((__m128i*)(fenc + (14) * FENC_STRIDE));
-        T03 = _mm_loadl_epi64((__m128i*)(fenc + (15) * FENC_STRIDE));
-        T03 = _mm_unpacklo_epi64(T02, T03);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref1 + (12) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref1 + (13) * frefstride));
-        T11 = _mm_unpacklo_epi64(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (14) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (15) * frefstride));
-        T13 = _mm_unpacklo_epi64(T12, T13);
-
-        T20 = _mm_sad_epu8(T01, T11);
-        T21 = _mm_sad_epu8(T03, T13);
-        T21 = _mm_add_epi32(T20, T21);
-        sum0 = _mm_shuffle_epi32(T21, 2);
-        sum0 = _mm_add_epi32(sum0, T21);
-        res[0] = res[0] + _mm_cvtsi128_si32(sum0);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref2 + (12) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref2 + (13) * frefstride));
-        T11 = _mm_unpacklo_epi64(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (14) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (15) * frefstride));
-        T13 = _mm_unpacklo_epi64(T12, T13);
-
-        T20 = _mm_sad_epu8(T01, T11);
-        T21 = _mm_sad_epu8(T03, T13);
-        T21 = _mm_add_epi32(T20, T21);
-        sum0 = _mm_shuffle_epi32(T21, 2);
-        sum0 = _mm_add_epi32(sum0, T21);
-        res[1] = res[1] + _mm_cvtsi128_si32(sum0);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref3 + (12) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref3 + (13) * frefstride));
-        T11 = _mm_unpacklo_epi64(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (14) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (15) * frefstride));
-        T13 = _mm_unpacklo_epi64(T12, T13);
-
-        T20 = _mm_sad_epu8(T01, T11);
-        T21 = _mm_sad_epu8(T03, T13);
-        T21 = _mm_add_epi32(T20, T21);
-        sum0 = _mm_shuffle_epi32(T21, 2);
-        sum0 = _mm_add_epi32(sum0, T21);
-        res[2] = res[2] + _mm_cvtsi128_si32(sum0);
-    }
-    else if ((ly % 8) == 0)
-    {
-        res[0] = res[1] = res[2] = 0;
-        for (int i = 0; i < ly; i += 8)
-        {
-            T00 = _mm_loadl_epi64((__m128i*)(fenc + (i + 0) * FENC_STRIDE));
-            T01 = _mm_loadl_epi64((__m128i*)(fenc + (i + 1) * FENC_STRIDE));
-            T01 = _mm_unpacklo_epi64(T00, T01);
-            T02 = _mm_loadl_epi64((__m128i*)(fenc + (i + 2) * FENC_STRIDE));
-            T03 = _mm_loadl_epi64((__m128i*)(fenc + (i + 3) * FENC_STRIDE));
-            T03 = _mm_unpacklo_epi64(T02, T03);
-
-            T10 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 0) * frefstride));
-            T11 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 1) * frefstride));
-            T11 = _mm_unpacklo_epi64(T10, T11);
-            T12 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 2) * frefstride));
-            T13 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 3) * frefstride));
-            T13 = _mm_unpacklo_epi64(T12, T13);
-
-            T20 = _mm_sad_epu8(T01, T11);
-            T21 = _mm_sad_epu8(T03, T13);
-            T21 = _mm_add_epi32(T20, T21);
-            sum0 = _mm_shuffle_epi32(T21, 2);
-            sum0 = _mm_add_epi32(sum0, T21);
-            res[0] = res[0] + _mm_cvtsi128_si32(sum0);
-
-            T10 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 0) * frefstride));
-            T11 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 1) * frefstride));
-            T11 = _mm_unpacklo_epi64(T10, T11);
-            T12 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 2) * frefstride));
-            T13 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 3) * frefstride));
-            T13 = _mm_unpacklo_epi64(T12, T13);
-
-            T20 = _mm_sad_epu8(T01, T11);
-            T21 = _mm_sad_epu8(T03, T13);
-            T21 = _mm_add_epi32(T20, T21);
-            sum0 = _mm_shuffle_epi32(T21, 2);
-            sum0 = _mm_add_epi32(sum0, T21);
-            res[1] = res[1] + _mm_cvtsi128_si32(sum0);
-
-            T10 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 0) * frefstride));
-            T11 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 1) * frefstride));
-            T11 = _mm_unpacklo_epi64(T10, T11);
-            T12 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 2) * frefstride));
-            T13 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 3) * frefstride));
-            T13 = _mm_unpacklo_epi64(T12, T13);
-
-            T20 = _mm_sad_epu8(T01, T11);
-            T21 = _mm_sad_epu8(T03, T13);
-            T21 = _mm_add_epi32(T20, T21);
-            sum0 = _mm_shuffle_epi32(T21, 2);
-            sum0 = _mm_add_epi32(sum0, T21);
-            res[2] = res[2] + _mm_cvtsi128_si32(sum0);
-
-            T00 = _mm_loadl_epi64((__m128i*)(fenc + (i + 4) * FENC_STRIDE));
-            T01 = _mm_loadl_epi64((__m128i*)(fenc + (i + 5) * FENC_STRIDE));
-            T01 = _mm_unpacklo_epi64(T00, T01);
-            T02 = _mm_loadl_epi64((__m128i*)(fenc + (i + 6) * FENC_STRIDE));
-            T03 = _mm_loadl_epi64((__m128i*)(fenc + (i + 7) * FENC_STRIDE));
-            T03 = _mm_unpacklo_epi64(T02, T03);
-
-            T10 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 4) * frefstride));
-            T11 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 5) * frefstride));
-            T11 = _mm_unpacklo_epi64(T10, T11);
-            T12 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 6) * frefstride));
-            T13 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 7) * frefstride));
-            T13 = _mm_unpacklo_epi64(T12, T13);
-
-            T20 = _mm_sad_epu8(T01, T11);
-            T21 = _mm_sad_epu8(T03, T13);
-            T21 = _mm_add_epi32(T20, T21);
-            sum0 = _mm_shuffle_epi32(T21, 2);
-            sum0 = _mm_add_epi32(sum0, T21);
-            res[0] = res[0] + _mm_cvtsi128_si32(sum0);
-
-            T10 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 4) * frefstride));
-            T11 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 5) * frefstride));
-            T11 = _mm_unpacklo_epi64(T10, T11);
-            T12 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 6) * frefstride));
-            T13 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 7) * frefstride));
-            T13 = _mm_unpacklo_epi64(T12, T13);
-
-            T20 = _mm_sad_epu8(T01, T11);
-            T21 = _mm_sad_epu8(T03, T13);
-            T21 = _mm_add_epi32(T20, T21);
-            sum0 = _mm_shuffle_epi32(T21, 2);
-            sum0 = _mm_add_epi32(sum0, T21);
-            res[1] = res[1] + _mm_cvtsi128_si32(sum0);
-
-            T10 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 4) * frefstride));
-            T11 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 5) * frefstride));
-            T11 = _mm_unpacklo_epi64(T10, T11);
-            T12 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 6) * frefstride));
-            T13 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 7) * frefstride));
-            T13 = _mm_unpacklo_epi64(T12, T13);
-
-            T20 = _mm_sad_epu8(T01, T11);
-            T21 = _mm_sad_epu8(T03, T13);
-            T21 = _mm_add_epi32(T20, T21);
-            sum0 = _mm_shuffle_epi32(T21, 2);
-            sum0 = _mm_add_epi32(sum0, T21);
-            res[2] = res[2] + _mm_cvtsi128_si32(sum0);
-        }
-    }
-    else
-    {
-        res[0] = res[1] = res[2] = 0;
-        for (int i = 0; i < ly; i += 4)
-        {
-            T00 = _mm_loadl_epi64((__m128i*)(fenc + (i + 0) * FENC_STRIDE));
-            T01 = _mm_loadl_epi64((__m128i*)(fenc + (i + 1) * FENC_STRIDE));
-            T01 = _mm_unpacklo_epi64(T00, T01);
-            T02 = _mm_loadl_epi64((__m128i*)(fenc + (i + 2) * FENC_STRIDE));
-            T03 = _mm_loadl_epi64((__m128i*)(fenc + (i + 3) * FENC_STRIDE));
-            T03 = _mm_unpacklo_epi64(T02, T03);
-
-            T10 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 0) * frefstride));
-            T11 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 1) * frefstride));
-            T11 = _mm_unpacklo_epi64(T10, T11);
-            T12 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 2) * frefstride));
-            T13 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 3) * frefstride));
-            T13 = _mm_unpacklo_epi64(T12, T13);
-
-            T20 = _mm_sad_epu8(T01, T11);
-            T21 = _mm_sad_epu8(T03, T13);
-            T21 = _mm_add_epi32(T20, T21);
-            sum0 = _mm_shuffle_epi32(T21, 2);
-            sum0 = _mm_add_epi32(sum0, T21);
-            res[0] = res[0] + _mm_cvtsi128_si32(sum0);
-
-            T10 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 0) * frefstride));
-            T11 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 1) * frefstride));
-            T11 = _mm_unpacklo_epi64(T10, T11);
-            T12 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 2) * frefstride));
-            T13 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 3) * frefstride));
-            T13 = _mm_unpacklo_epi64(T12, T13);
-
-            T20 = _mm_sad_epu8(T01, T11);
-            T21 = _mm_sad_epu8(T03, T13);
-            T21 = _mm_add_epi32(T20, T21);
-            sum0 = _mm_shuffle_epi32(T21, 2);
-            sum0 = _mm_add_epi32(sum0, T21);
-            res[1] = res[1] + _mm_cvtsi128_si32(sum0);
-
-            T10 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 0) * frefstride));
-            T11 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 1) * frefstride));
-            T11 = _mm_unpacklo_epi64(T10, T11);
-            T12 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 2) * frefstride));
-            T13 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 3) * frefstride));
-            T13 = _mm_unpacklo_epi64(T12, T13);
-
-            T20 = _mm_sad_epu8(T01, T11);
-            T21 = _mm_sad_epu8(T03, T13);
-            T21 = _mm_add_epi32(T20, T21);
-            sum0 = _mm_shuffle_epi32(T21, 2);
-            sum0 = _mm_add_epi32(sum0, T21);
-            res[2] = res[2] + _mm_cvtsi128_si32(sum0);
-        }
-    }
-}
-
-#endif /* if HAVE_MMX */
-#endif /* if INSTRSET >= X265_CPU_LEVEL_SSE41 */
-
-/* For performance - This function assumes that the *last load* can access 16 elements. */
-template<int ly>
-void sad_x3_12(pixel *fenc, pixel *fref1, pixel *fref2, pixel *fref3, intptr_t frefstride, int *res)
-{
-    Vec16uc m1, n1, n2, n3;
-
-    Vec4i sum1(0), sum2(0), sum3(0);
-    Vec8us sad1(0), sad2(0), sad3(0);
-    int max_iterators = (ly >> 4) << 4;
-    int row;
-
-    for (row = 0; row < max_iterators; row += 16)
-    {
-        for (int i = 0; i < 16; i++)
-        {
-            m1.load_a(fenc);
-            m1.cutoff(12);
-            n1.load(fref1);
-            n1.cutoff(12);
-            n2.load(fref2);
-            n2.cutoff(12);
-            n3.load(fref3);
-            n3.cutoff(12);
-
-            sad1.addSumAbsDiff(m1, n1);
-            sad2.addSumAbsDiff(m1, n2);
-            sad3.addSumAbsDiff(m1, n3);
-
-            fenc += FENC_STRIDE;
-            fref1 += frefstride;
-            fref2 += frefstride;
-            fref3 += frefstride;
-        }
-
-        sum1 += extend_low(sad1) + extend_high(sad1);
-        sum2 += extend_low(sad2) + extend_high(sad2);
-        sum3 += extend_low(sad3) + extend_high(sad3);
-        sad1 = 0;
-        sad2 = 0;
-        sad3 = 0;
-    }
-
-    while (row++ < ly)
-    {
-        m1.load_a(fenc);
-        m1.cutoff(12);
-        n1.load(fref1);
-        n1.cutoff(12);
-        n2.load(fref2);
-        n2.cutoff(12);
-        n3.load(fref3);
-        n3.cutoff(12);
-
-        sad1.addSumAbsDiff(m1, n1);
-        sad2.addSumAbsDiff(m1, n2);
-        sad3.addSumAbsDiff(m1, n3);
-
-        fenc += FENC_STRIDE;
-        fref1 += frefstride;
-        fref2 += frefstride;
-        fref3 += frefstride;
-    }
-
-    sum1 += extend_low(sad1) + extend_high(sad1);
-    sum2 += extend_low(sad2) + extend_high(sad2);
-    sum3 += extend_low(sad3) + extend_high(sad3);
-
-    res[0] = horizontal_add(sum1);
-    res[1] = horizontal_add(sum2);
-    res[2] = horizontal_add(sum3);
-}
-
-#if INSTRSET >= X265_CPU_LEVEL_SSE41
-template<int ly>
-void sad_x3_16(pixel *fenc, pixel *fref1, pixel *fref2, pixel *fref3, intptr_t frefstride, int *res)
-{
-    assert((ly % 4) == 0);
-
-    __m128i sum0, sum1;
-
-    __m128i T00, T01, T02, T03;
-    __m128i T10, T11, T12, T13;
-    __m128i T20, T21, T22, T23;
-
-    if (ly == 4)
-    {
-        T00 = _mm_load_si128((__m128i*)(fenc + (0) * FENC_STRIDE));
-        T01 = _mm_load_si128((__m128i*)(fenc + (1) * FENC_STRIDE));
-        T02 = _mm_load_si128((__m128i*)(fenc + (2) * FENC_STRIDE));
-        T03 = _mm_load_si128((__m128i*)(fenc + (3) * FENC_STRIDE));
-
-        T10 = _mm_loadu_si128((__m128i*)(fref1 + (0) * frefstride));
-        T11 = _mm_loadu_si128((__m128i*)(fref1 + (1) * frefstride));
-        T12 = _mm_loadu_si128((__m128i*)(fref1 + (2) * frefstride));
-        T13 = _mm_loadu_si128((__m128i*)(fref1 + (3) * frefstride));
-
-        T20 = _mm_sad_epu8(T00, T10);
-        T21 = _mm_sad_epu8(T01, T11);
-        T22 = _mm_sad_epu8(T02, T12);
-        T23 = _mm_sad_epu8(T03, T13);
-
-        T20 = _mm_add_epi16(T20, T21);
-        T22 = _mm_add_epi16(T22, T23);
-        sum0 = _mm_add_epi16(T20, T22);
-
-        sum1 = _mm_shuffle_epi32(sum0, 2);
-        sum0 = _mm_add_epi32(sum0, sum1);
-        res[0] = _mm_cvtsi128_si32(sum0);
-
-        T10 = _mm_loadu_si128((__m128i*)(fref2 + (0) * frefstride));
-        T11 = _mm_loadu_si128((__m128i*)(fref2 + (1) * frefstride));
-        T12 = _mm_loadu_si128((__m128i*)(fref2 + (2) * frefstride));
-        T13 = _mm_loadu_si128((__m128i*)(fref2 + (3) * frefstride));
-
-        T20 = _mm_sad_epu8(T00, T10);
-        T21 = _mm_sad_epu8(T01, T11);
-        T22 = _mm_sad_epu8(T02, T12);
-        T23 = _mm_sad_epu8(T03, T13);
-
-        T20 = _mm_add_epi16(T20, T21);
-        T22 = _mm_add_epi16(T22, T23);
-        sum0 = _mm_add_epi16(T20, T22);
-
-        sum1 = _mm_shuffle_epi32(sum0, 2);
-        sum0 = _mm_add_epi32(sum0, sum1);
-        res[1] = _mm_cvtsi128_si32(sum0);
-
-        T10 = _mm_loadu_si128((__m128i*)(fref3 + (0) * frefstride));
-        T11 = _mm_loadu_si128((__m128i*)(fref3 + (1) * frefstride));
-        T12 = _mm_loadu_si128((__m128i*)(fref3 + (2) * frefstride));
-        T13 = _mm_loadu_si128((__m128i*)(fref3 + (3) * frefstride));
-
-        T20 = _mm_sad_epu8(T00, T10);
-        T21 = _mm_sad_epu8(T01, T11);
-        T22 = _mm_sad_epu8(T02, T12);
-        T23 = _mm_sad_epu8(T03, T13);
-
-        T20 = _mm_add_epi16(T20, T21);
-        T22 = _mm_add_epi16(T22, T23);
-        sum0 = _mm_add_epi16(T20, T22);
-
-        sum1 = _mm_shuffle_epi32(sum0, 2);
-        sum0 = _mm_add_epi32(sum0, sum1);
-        res[2] = _mm_cvtsi128_si32(sum0);
-    }
-    else if (ly == 8)
-    {
-        T00 = _mm_load_si128((__m128i*)(fenc + (0) * FENC_STRIDE));
-        T01 = _mm_load_si128((__m128i*)(fenc + (1) * FENC_STRIDE));
-        T02 = _mm_load_si128((__m128i*)(fenc + (2) * FENC_STRIDE));
-        T03 = _mm_load_si128((__m128i*)(fenc + (3) * FENC_STRIDE));
-
-        T10 = _mm_loadu_si128((__m128i*)(fref1 + (0) * frefstride));
-        T11 = _mm_loadu_si128((__m128i*)(fref1 + (1) * frefstride));
-        T12 = _mm_loadu_si128((__m128i*)(fref1 + (2) * frefstride));
-        T13 = _mm_loadu_si128((__m128i*)(fref1 + (3) * frefstride));
-
-        T20 = _mm_sad_epu8(T00, T10);
-        T21 = _mm_sad_epu8(T01, T11);
-        T22 = _mm_sad_epu8(T02, T12);
-        T23 = _mm_sad_epu8(T03, T13);
-
-        T20 = _mm_add_epi16(T20, T21);
-        T22 = _mm_add_epi16(T22, T23);
-        sum0 = _mm_add_epi16(T20, T22);
-
-        sum1 = _mm_shuffle_epi32(sum0, 2);
-        sum0 = _mm_add_epi32(sum0, sum1);
-        res[0] = _mm_cvtsi128_si32(sum0);
-
-        T10 = _mm_loadu_si128((__m128i*)(fref2 + (0) * frefstride));
-        T11 = _mm_loadu_si128((__m128i*)(fref2 + (1) * frefstride));
-        T12 = _mm_loadu_si128((__m128i*)(fref2 + (2) * frefstride));
-        T13 = _mm_loadu_si128((__m128i*)(fref2 + (3) * frefstride));
-
-        T20 = _mm_sad_epu8(T00, T10);
-        T21 = _mm_sad_epu8(T01, T11);
-        T22 = _mm_sad_epu8(T02, T12);
-        T23 = _mm_sad_epu8(T03, T13);
-
-        T20 = _mm_add_epi16(T20, T21);
-        T22 = _mm_add_epi16(T22, T23);
-        sum0 = _mm_add_epi16(T20, T22);
-
-        sum1 = _mm_shuffle_epi32(sum0, 2);
-        sum0 = _mm_add_epi32(sum0, sum1);
-        res[1] = _mm_cvtsi128_si32(sum0);
-
-        T10 = _mm_loadu_si128((__m128i*)(fref3 + (0) * frefstride));
-        T11 = _mm_loadu_si128((__m128i*)(fref3 + (1) * frefstride));
-        T12 = _mm_loadu_si128((__m128i*)(fref3 + (2) * frefstride));
-        T13 = _mm_loadu_si128((__m128i*)(fref3 + (3) * frefstride));
-
-        T20 = _mm_sad_epu8(T00, T10);
-        T21 = _mm_sad_epu8(T01, T11);
-        T22 = _mm_sad_epu8(T02, T12);
-        T23 = _mm_sad_epu8(T03, T13);
-
-        T20 = _mm_add_epi16(T20, T21);
-        T22 = _mm_add_epi16(T22, T23);
-        sum0 = _mm_add_epi16(T20, T22);
-
-        sum1 = _mm_shuffle_epi32(sum0, 2);
-        sum0 = _mm_add_epi32(sum0, sum1);
-        res[2] = _mm_cvtsi128_si32(sum0);
-
-        T00 = _mm_load_si128((__m128i*)(fenc + (4) * FENC_STRIDE));
-        T01 = _mm_load_si128((__m128i*)(fenc + (5) * FENC_STRIDE));
-        T02 = _mm_load_si128((__m128i*)(fenc + (6) * FENC_STRIDE));
-        T03 = _mm_load_si128((__m128i*)(fenc + (7) * FENC_STRIDE));
-
-        T10 = _mm_loadu_si128((__m128i*)(fref1 + (4) * frefstride));
-        T11 = _mm_loadu_si128((__m128i*)(fref1 + (5) * frefstride));
-        T12 = _mm_loadu_si128((__m128i*)(fref1 + (6) * frefstride));
-        T13 = _mm_loadu_si128((__m128i*)(fref1 + (7) * frefstride));
-
-        T20 = _mm_sad_epu8(T00, T10);
-        T21 = _mm_sad_epu8(T01, T11);
-        T22 = _mm_sad_epu8(T02, T12);
-        T23 = _mm_sad_epu8(T03, T13);
-
-        T20 = _mm_add_epi16(T20, T21);
-        T22 = _mm_add_epi16(T22, T23);
-        sum0 = _mm_add_epi16(T20, T22);
-
-        sum1 = _mm_shuffle_epi32(sum0, 2);
-        sum0 = _mm_add_epi32(sum0, sum1);
-        res[0] = res[0] + _mm_cvtsi128_si32(sum0);
-
-        T10 = _mm_loadu_si128((__m128i*)(fref2 + (4) * frefstride));
-        T11 = _mm_loadu_si128((__m128i*)(fref2 + (5) * frefstride));
-        T12 = _mm_loadu_si128((__m128i*)(fref2 + (6) * frefstride));
-        T13 = _mm_loadu_si128((__m128i*)(fref2 + (7) * frefstride));
-
-        T20 = _mm_sad_epu8(T00, T10);
-        T21 = _mm_sad_epu8(T01, T11);
-        T22 = _mm_sad_epu8(T02, T12);
-        T23 = _mm_sad_epu8(T03, T13);
-
-        T20 = _mm_add_epi16(T20, T21);
-        T22 = _mm_add_epi16(T22, T23);
-        sum0 = _mm_add_epi16(T20, T22);
-
-        sum1 = _mm_shuffle_epi32(sum0, 2);
-        sum0 = _mm_add_epi32(sum0, sum1);
-        res[1] = res[1] + _mm_cvtsi128_si32(sum0);
-
-        T10 = _mm_loadu_si128((__m128i*)(fref3 + (4) * frefstride));
-        T11 = _mm_loadu_si128((__m128i*)(fref3 + (5) * frefstride));
-        T12 = _mm_loadu_si128((__m128i*)(fref3 + (6) * frefstride));
-        T13 = _mm_loadu_si128((__m128i*)(fref3 + (7) * frefstride));
-
-        T20 = _mm_sad_epu8(T00, T10);
-        T21 = _mm_sad_epu8(T01, T11);
-        T22 = _mm_sad_epu8(T02, T12);
-        T23 = _mm_sad_epu8(T03, T13);
-
-        T20 = _mm_add_epi16(T20, T21);
-        T22 = _mm_add_epi16(T22, T23);
-        sum0 = _mm_add_epi16(T20, T22);
-
-        sum1 = _mm_shuffle_epi32(sum0, 2);
-        sum0 = _mm_add_epi32(sum0, sum1);
-        res[2] = res[2] + _mm_cvtsi128_si32(sum0);
-    }
-    else if (ly == 16)
-    {
-        T00 = _mm_load_si128((__m128i*)(fenc + (0) * FENC_STRIDE));
-        T01 = _mm_load_si128((__m128i*)(fenc + (1) * FENC_STRIDE));
-        T02 = _mm_load_si128((__m128i*)(fenc + (2) * FENC_STRIDE));
-        T03 = _mm_load_si128((__m128i*)(fenc + (3) * FENC_STRIDE));
-
-        T10 = _mm_loadu_si128((__m128i*)(fref1 + (0) * frefstride));
-        T11 = _mm_loadu_si128((__m128i*)(fref1 + (1) * frefstride));
-        T12 = _mm_loadu_si128((__m128i*)(fref1 + (2) * frefstride));
-        T13 = _mm_loadu_si128((__m128i*)(fref1 + (3) * frefstride));
-
-        T20 = _mm_sad_epu8(T00, T10);
-        T21 = _mm_sad_epu8(T01, T11);
-        T22 = _mm_sad_epu8(T02, T12);
-        T23 = _mm_sad_epu8(T03, T13);
-
-        T20 = _mm_add_epi16(T20, T21);
-        T22 = _mm_add_epi16(T22, T23);
-        sum0 = _mm_add_epi16(T20, T22);
-
-        sum1 = _mm_shuffle_epi32(sum0, 2);
-        sum0 = _mm_add_epi32(sum0, sum1);
-        res[0] = _mm_cvtsi128_si32(sum0);
-
-        T10 = _mm_loadu_si128((__m128i*)(fref2 + (0) * frefstride));
-        T11 = _mm_loadu_si128((__m128i*)(fref2 + (1) * frefstride));
-        T12 = _mm_loadu_si128((__m128i*)(fref2 + (2) * frefstride));
-        T13 = _mm_loadu_si128((__m128i*)(fref2 + (3) * frefstride));
-
-        T20 = _mm_sad_epu8(T00, T10);
-        T21 = _mm_sad_epu8(T01, T11);
-        T22 = _mm_sad_epu8(T02, T12);
-        T23 = _mm_sad_epu8(T03, T13);
-
-        T20 = _mm_add_epi16(T20, T21);
-        T22 = _mm_add_epi16(T22, T23);
-        sum0 = _mm_add_epi16(T20, T22);
-
-        sum1 = _mm_shuffle_epi32(sum0, 2);
-        sum0 = _mm_add_epi32(sum0, sum1);
-        res[1] = _mm_cvtsi128_si32(sum0);
-
-        T10 = _mm_loadu_si128((__m128i*)(fref3 + (0) * frefstride));
-        T11 = _mm_loadu_si128((__m128i*)(fref3 + (1) * frefstride));
-        T12 = _mm_loadu_si128((__m128i*)(fref3 + (2) * frefstride));
-        T13 = _mm_loadu_si128((__m128i*)(fref3 + (3) * frefstride));
-
-        T20 = _mm_sad_epu8(T00, T10);
-        T21 = _mm_sad_epu8(T01, T11);
-        T22 = _mm_sad_epu8(T02, T12);
-        T23 = _mm_sad_epu8(T03, T13);
-
-        T20 = _mm_add_epi16(T20, T21);
-        T22 = _mm_add_epi16(T22, T23);
-        sum0 = _mm_add_epi16(T20, T22);
-
-        sum1 = _mm_shuffle_epi32(sum0, 2);
-        sum0 = _mm_add_epi32(sum0, sum1);
-        res[2] = _mm_cvtsi128_si32(sum0);
-
-        T00 = _mm_load_si128((__m128i*)(fenc + (4) * FENC_STRIDE));
-        T01 = _mm_load_si128((__m128i*)(fenc + (5) * FENC_STRIDE));
-        T02 = _mm_load_si128((__m128i*)(fenc + (6) * FENC_STRIDE));
-        T03 = _mm_load_si128((__m128i*)(fenc + (7) * FENC_STRIDE));
-
-        T10 = _mm_loadu_si128((__m128i*)(fref1 + (4) * frefstride));
-        T11 = _mm_loadu_si128((__m128i*)(fref1 + (5) * frefstride));
-        T12 = _mm_loadu_si128((__m128i*)(fref1 + (6) * frefstride));
-        T13 = _mm_loadu_si128((__m128i*)(fref1 + (7) * frefstride));
-
-        T20 = _mm_sad_epu8(T00, T10);
-        T21 = _mm_sad_epu8(T01, T11);
-        T22 = _mm_sad_epu8(T02, T12);
-        T23 = _mm_sad_epu8(T03, T13);
-
-        T20 = _mm_add_epi16(T20, T21);
-        T22 = _mm_add_epi16(T22, T23);
-        sum0 = _mm_add_epi16(T20, T22);
-
-        sum1 = _mm_shuffle_epi32(sum0, 2);
-        sum0 = _mm_add_epi32(sum0, sum1);
-        res[0] = res[0] + _mm_cvtsi128_si32(sum0);
-
-        T10 = _mm_loadu_si128((__m128i*)(fref2 + (4) * frefstride));
-        T11 = _mm_loadu_si128((__m128i*)(fref2 + (5) * frefstride));
-        T12 = _mm_loadu_si128((__m128i*)(fref2 + (6) * frefstride));
-        T13 = _mm_loadu_si128((__m128i*)(fref2 + (7) * frefstride));
-
-        T20 = _mm_sad_epu8(T00, T10);
-        T21 = _mm_sad_epu8(T01, T11);
-        T22 = _mm_sad_epu8(T02, T12);
-        T23 = _mm_sad_epu8(T03, T13);
-
-        T20 = _mm_add_epi16(T20, T21);
-        T22 = _mm_add_epi16(T22, T23);
-        sum0 = _mm_add_epi16(T20, T22);
-
-        sum1 = _mm_shuffle_epi32(sum0, 2);
-        sum0 = _mm_add_epi32(sum0, sum1);
-        res[1] = res[1] + _mm_cvtsi128_si32(sum0);
-
-        T10 = _mm_loadu_si128((__m128i*)(fref3 + (4) * frefstride));
-        T11 = _mm_loadu_si128((__m128i*)(fref3 + (5) * frefstride));
-        T12 = _mm_loadu_si128((__m128i*)(fref3 + (6) * frefstride));
-        T13 = _mm_loadu_si128((__m128i*)(fref3 + (7) * frefstride));
-
-        T20 = _mm_sad_epu8(T00, T10);
-        T21 = _mm_sad_epu8(T01, T11);
-        T22 = _mm_sad_epu8(T02, T12);
-        T23 = _mm_sad_epu8(T03, T13);
-
-        T20 = _mm_add_epi16(T20, T21);
-        T22 = _mm_add_epi16(T22, T23);
-        sum0 = _mm_add_epi16(T20, T22);
-
-        sum1 = _mm_shuffle_epi32(sum0, 2);
-        sum0 = _mm_add_epi32(sum0, sum1);
-        res[2] = res[2] + _mm_cvtsi128_si32(sum0);
-
-        T00 = _mm_load_si128((__m128i*)(fenc + (8) * FENC_STRIDE));
-        T01 = _mm_load_si128((__m128i*)(fenc + (9) * FENC_STRIDE));
-        T02 = _mm_load_si128((__m128i*)(fenc + (10) * FENC_STRIDE));
-        T03 = _mm_load_si128((__m128i*)(fenc + (11) * FENC_STRIDE));
-
-        T10 = _mm_loadu_si128((__m128i*)(fref1 + (8) * frefstride));
-        T11 = _mm_loadu_si128((__m128i*)(fref1 + (9) * frefstride));
-        T12 = _mm_loadu_si128((__m128i*)(fref1 + (10) * frefstride));
-        T13 = _mm_loadu_si128((__m128i*)(fref1 + (11) * frefstride));
-
-        T20 = _mm_sad_epu8(T00, T10);
-        T21 = _mm_sad_epu8(T01, T11);
-        T22 = _mm_sad_epu8(T02, T12);
-        T23 = _mm_sad_epu8(T03, T13);
-
-        T20 = _mm_add_epi16(T20, T21);
-        T22 = _mm_add_epi16(T22, T23);
-        sum0 = _mm_add_epi16(T20, T22);
-
-        sum1 = _mm_shuffle_epi32(sum0, 2);
-        sum0 = _mm_add_epi32(sum0, sum1);
-        res[0] = res[0] + _mm_cvtsi128_si32(sum0);
-
-        T10 = _mm_loadu_si128((__m128i*)(fref2 + (8) * frefstride));
-        T11 = _mm_loadu_si128((__m128i*)(fref2 + (9) * frefstride));
-        T12 = _mm_loadu_si128((__m128i*)(fref2 + (10) * frefstride));
-        T13 = _mm_loadu_si128((__m128i*)(fref2 + (11) * frefstride));
-
-        T20 = _mm_sad_epu8(T00, T10);
-        T21 = _mm_sad_epu8(T01, T11);
-        T22 = _mm_sad_epu8(T02, T12);
-        T23 = _mm_sad_epu8(T03, T13);
-
-        T20 = _mm_add_epi16(T20, T21);
-        T22 = _mm_add_epi16(T22, T23);
-        sum0 = _mm_add_epi16(T20, T22);
-
-        sum1 = _mm_shuffle_epi32(sum0, 2);
-        sum0 = _mm_add_epi32(sum0, sum1);
-        res[1] = res[1] + _mm_cvtsi128_si32(sum0);
-
-        T10 = _mm_loadu_si128((__m128i*)(fref3 + (8) * frefstride));
-        T11 = _mm_loadu_si128((__m128i*)(fref3 + (9) * frefstride));
-        T12 = _mm_loadu_si128((__m128i*)(fref3 + (10) * frefstride));
-        T13 = _mm_loadu_si128((__m128i*)(fref3 + (11) * frefstride));
-
-        T20 = _mm_sad_epu8(T00, T10);
-        T21 = _mm_sad_epu8(T01, T11);
-        T22 = _mm_sad_epu8(T02, T12);
-        T23 = _mm_sad_epu8(T03, T13);
-
-        T20 = _mm_add_epi16(T20, T21);
-        T22 = _mm_add_epi16(T22, T23);
-        sum0 = _mm_add_epi16(T20, T22);
-
-        sum1 = _mm_shuffle_epi32(sum0, 2);
-        sum0 = _mm_add_epi32(sum0, sum1);
-        res[2] = res[2] + _mm_cvtsi128_si32(sum0);
-
-        T00 = _mm_load_si128((__m128i*)(fenc + (12) * FENC_STRIDE));
-        T01 = _mm_load_si128((__m128i*)(fenc + (13) * FENC_STRIDE));
-        T02 = _mm_load_si128((__m128i*)(fenc + (14) * FENC_STRIDE));
-        T03 = _mm_load_si128((__m128i*)(fenc + (15) * FENC_STRIDE));
-
-        T10 = _mm_loadu_si128((__m128i*)(fref1 + (12) * frefstride));
-        T11 = _mm_loadu_si128((__m128i*)(fref1 + (13) * frefstride));
-        T12 = _mm_loadu_si128((__m128i*)(fref1 + (14) * frefstride));
-        T13 = _mm_loadu_si128((__m128i*)(fref1 + (15) * frefstride));
-
-        T20 = _mm_sad_epu8(T00, T10);
-        T21 = _mm_sad_epu8(T01, T11);
-        T22 = _mm_sad_epu8(T02, T12);
-        T23 = _mm_sad_epu8(T03, T13);
-
-        T20 = _mm_add_epi16(T20, T21);
-        T22 = _mm_add_epi16(T22, T23);
-        sum0 = _mm_add_epi16(T20, T22);
-
-        sum1 = _mm_shuffle_epi32(sum0, 2);
-        sum0 = _mm_add_epi32(sum0, sum1);
-        res[0] = res[0] + _mm_cvtsi128_si32(sum0);
-
-        T10 = _mm_loadu_si128((__m128i*)(fref2 + (12) * frefstride));
-        T11 = _mm_loadu_si128((__m128i*)(fref2 + (13) * frefstride));
-        T12 = _mm_loadu_si128((__m128i*)(fref2 + (14) * frefstride));
-        T13 = _mm_loadu_si128((__m128i*)(fref2 + (15) * frefstride));
-
-        T20 = _mm_sad_epu8(T00, T10);
-        T21 = _mm_sad_epu8(T01, T11);
-        T22 = _mm_sad_epu8(T02, T12);
-        T23 = _mm_sad_epu8(T03, T13);
-
-        T20 = _mm_add_epi16(T20, T21);
-        T22 = _mm_add_epi16(T22, T23);
-        sum0 = _mm_add_epi16(T20, T22);
-
-        sum1 = _mm_shuffle_epi32(sum0, 2);
-        sum0 = _mm_add_epi32(sum0, sum1);
-        res[1] = res[1] + _mm_cvtsi128_si32(sum0);
-
-        T10 = _mm_loadu_si128((__m128i*)(fref3 + (12) * frefstride));
-        T11 = _mm_loadu_si128((__m128i*)(fref3 + (13) * frefstride));
-        T12 = _mm_loadu_si128((__m128i*)(fref3 + (14) * frefstride));
-        T13 = _mm_loadu_si128((__m128i*)(fref3 + (15) * frefstride));
-
-        T20 = _mm_sad_epu8(T00, T10);
-        T21 = _mm_sad_epu8(T01, T11);
-        T22 = _mm_sad_epu8(T02, T12);
-        T23 = _mm_sad_epu8(T03, T13);
-
-        T20 = _mm_add_epi16(T20, T21);
-        T22 = _mm_add_epi16(T22, T23);
-        sum0 = _mm_add_epi16(T20, T22);
-
-        sum1 = _mm_shuffle_epi32(sum0, 2);
-        sum0 = _mm_add_epi32(sum0, sum1);
-        res[2] = res[2] + _mm_cvtsi128_si32(sum0);
-    }
-    else if ((ly % 8) == 0)
-    {
-        res[0] = res[1] = res[2] = 0;
-        for (int i = 0; i < ly; i += 8)
-        {
-            T00 = _mm_load_si128((__m128i*)(fenc + (i + 0) * FENC_STRIDE));
-            T01 = _mm_load_si128((__m128i*)(fenc + (i + 1) * FENC_STRIDE));
-            T02 = _mm_load_si128((__m128i*)(fenc + (i + 2) * FENC_STRIDE));
-            T03 = _mm_load_si128((__m128i*)(fenc + (i + 3) * FENC_STRIDE));
-
-            T10 = _mm_loadu_si128((__m128i*)(fref1 + (i + 0) * frefstride));
-            T11 = _mm_loadu_si128((__m128i*)(fref1 + (i + 1) * frefstride));
-            T12 = _mm_loadu_si128((__m128i*)(fref1 + (i + 2) * frefstride));
-            T13 = _mm_loadu_si128((__m128i*)(fref1 + (i + 3) * frefstride));
-
-            T20 = _mm_sad_epu8(T00, T10);
-            T21 = _mm_sad_epu8(T01, T11);
-            T22 = _mm_sad_epu8(T02, T12);
-            T23 = _mm_sad_epu8(T03, T13);
-
-            T20 = _mm_add_epi16(T20, T21);
-            T22 = _mm_add_epi16(T22, T23);
-            sum0 = _mm_add_epi16(T20, T22);
-
-            sum1 = _mm_shuffle_epi32(sum0, 2);
-            sum0 = _mm_add_epi32(sum0, sum1);
-            res[0] = res[0] + _mm_cvtsi128_si32(sum0);
-
-            T10 = _mm_loadu_si128((__m128i*)(fref2 + (i + 0) * frefstride));
-            T11 = _mm_loadu_si128((__m128i*)(fref2 + (i + 1) * frefstride));
-            T12 = _mm_loadu_si128((__m128i*)(fref2 + (i + 2) * frefstride));
-            T13 = _mm_loadu_si128((__m128i*)(fref2 + (i + 3) * frefstride));
-
-            T20 = _mm_sad_epu8(T00, T10);
-            T21 = _mm_sad_epu8(T01, T11);
-            T22 = _mm_sad_epu8(T02, T12);
-            T23 = _mm_sad_epu8(T03, T13);
-
-            T20 = _mm_add_epi16(T20, T21);
-            T22 = _mm_add_epi16(T22, T23);
-            sum0 = _mm_add_epi16(T20, T22);
-
-            sum1 = _mm_shuffle_epi32(sum0, 2);
-            sum0 = _mm_add_epi32(sum0, sum1);
-            res[1] = res[1] + _mm_cvtsi128_si32(sum0);
-
-            T10 = _mm_loadu_si128((__m128i*)(fref3 + (i + 0) * frefstride));
-            T11 = _mm_loadu_si128((__m128i*)(fref3 + (i + 1) * frefstride));
-            T12 = _mm_loadu_si128((__m128i*)(fref3 + (i + 2) * frefstride));
-            T13 = _mm_loadu_si128((__m128i*)(fref3 + (i + 3) * frefstride));
-
-            T20 = _mm_sad_epu8(T00, T10);
-            T21 = _mm_sad_epu8(T01, T11);
-            T22 = _mm_sad_epu8(T02, T12);
-            T23 = _mm_sad_epu8(T03, T13);
-
-            T20 = _mm_add_epi16(T20, T21);
-            T22 = _mm_add_epi16(T22, T23);
-            sum0 = _mm_add_epi16(T20, T22);
-
-            sum1 = _mm_shuffle_epi32(sum0, 2);
-            sum0 = _mm_add_epi32(sum0, sum1);
-            res[2] = res[2] + _mm_cvtsi128_si32(sum0);
-
-            T00 = _mm_load_si128((__m128i*)(fenc + (i + 4) * FENC_STRIDE));
-            T01 = _mm_load_si128((__m128i*)(fenc + (i + 5) * FENC_STRIDE));
-            T02 = _mm_load_si128((__m128i*)(fenc + (i + 6) * FENC_STRIDE));
-            T03 = _mm_load_si128((__m128i*)(fenc + (i + 7) * FENC_STRIDE));
-
-            T10 = _mm_loadu_si128((__m128i*)(fref1 + (i + 4) * frefstride));
-            T11 = _mm_loadu_si128((__m128i*)(fref1 + (i + 5) * frefstride));
-            T12 = _mm_loadu_si128((__m128i*)(fref1 + (i + 6) * frefstride));
-            T13 = _mm_loadu_si128((__m128i*)(fref1 + (i + 7) * frefstride));
-
-            T20 = _mm_sad_epu8(T00, T10);
-            T21 = _mm_sad_epu8(T01, T11);
-            T22 = _mm_sad_epu8(T02, T12);
-            T23 = _mm_sad_epu8(T03, T13);
-
-            T20 = _mm_add_epi16(T20, T21);
-            T22 = _mm_add_epi16(T22, T23);
-            sum0 = _mm_add_epi16(T20, T22);
-
-            sum1 = _mm_shuffle_epi32(sum0, 2);
-            sum0 = _mm_add_epi32(sum0, sum1);
-            res[0] = res[0] + _mm_cvtsi128_si32(sum0);
-
-            T10 = _mm_loadu_si128((__m128i*)(fref2 + (i + 4) * frefstride));
-            T11 = _mm_loadu_si128((__m128i*)(fref2 + (i + 5) * frefstride));
-            T12 = _mm_loadu_si128((__m128i*)(fref2 + (i + 6) * frefstride));
-            T13 = _mm_loadu_si128((__m128i*)(fref2 + (i + 7) * frefstride));
-
-            T20 = _mm_sad_epu8(T00, T10);
-            T21 = _mm_sad_epu8(T01, T11);
-            T22 = _mm_sad_epu8(T02, T12);
-            T23 = _mm_sad_epu8(T03, T13);
-
-            T20 = _mm_add_epi16(T20, T21);
-            T22 = _mm_add_epi16(T22, T23);
-            sum0 = _mm_add_epi16(T20, T22);
-
-            sum1 = _mm_shuffle_epi32(sum0, 2);
-            sum0 = _mm_add_epi32(sum0, sum1);
-            res[1] = res[1] + _mm_cvtsi128_si32(sum0);
-
-            T10 = _mm_loadu_si128((__m128i*)(fref3 + (i + 4) * frefstride));
-            T11 = _mm_loadu_si128((__m128i*)(fref3 + (i + 5) * frefstride));
-            T12 = _mm_loadu_si128((__m128i*)(fref3 + (i + 6) * frefstride));
-            T13 = _mm_loadu_si128((__m128i*)(fref3 + (i + 7) * frefstride));
-
-            T20 = _mm_sad_epu8(T00, T10);
-            T21 = _mm_sad_epu8(T01, T11);
-            T22 = _mm_sad_epu8(T02, T12);
-            T23 = _mm_sad_epu8(T03, T13);
-
-            T20 = _mm_add_epi16(T20, T21);
-            T22 = _mm_add_epi16(T22, T23);
-            sum0 = _mm_add_epi16(T20, T22);
-
-            sum1 = _mm_shuffle_epi32(sum0, 2);
-            sum0 = _mm_add_epi32(sum0, sum1);
-            res[2] = res[2] + _mm_cvtsi128_si32(sum0);
-        }
-    }
-    else
-    {
-        res[0] = res[1] = res[2] = 0;
-        for (int i = 0; i < ly; i += 4)
-        {
-            T00 = _mm_load_si128((__m128i*)(fenc + (i + 0) * FENC_STRIDE));
-            T01 = _mm_load_si128((__m128i*)(fenc + (i + 1) * FENC_STRIDE));
-            T02 = _mm_load_si128((__m128i*)(fenc + (i + 2) * FENC_STRIDE));
-            T03 = _mm_load_si128((__m128i*)(fenc + (i + 3) * FENC_STRIDE));
-
-            T10 = _mm_loadu_si128((__m128i*)(fref1 + (i + 0) * frefstride));
-            T11 = _mm_loadu_si128((__m128i*)(fref1 + (i + 1) * frefstride));
-            T12 = _mm_loadu_si128((__m128i*)(fref1 + (i + 2) * frefstride));
-            T13 = _mm_loadu_si128((__m128i*)(fref1 + (i + 3) * frefstride));
-
-            T20 = _mm_sad_epu8(T00, T10);
-            T21 = _mm_sad_epu8(T01, T11);
-            T22 = _mm_sad_epu8(T02, T12);
-            T23 = _mm_sad_epu8(T03, T13);
-
-            T20 = _mm_add_epi16(T20, T21);
-            T22 = _mm_add_epi16(T22, T23);
-            sum0 = _mm_add_epi16(T20, T22);
-
-            sum1 = _mm_shuffle_epi32(sum0, 2);
-            sum0 = _mm_add_epi32(sum0, sum1);
-            res[0] = res[0] + _mm_cvtsi128_si32(sum0);
-
-            T10 = _mm_loadu_si128((__m128i*)(fref2 + (i + 0) * frefstride));
-            T11 = _mm_loadu_si128((__m128i*)(fref2 + (i + 1) * frefstride));
-            T12 = _mm_loadu_si128((__m128i*)(fref2 + (i + 2) * frefstride));
-            T13 = _mm_loadu_si128((__m128i*)(fref2 + (i + 3) * frefstride));
-
-            T20 = _mm_sad_epu8(T00, T10);
-            T21 = _mm_sad_epu8(T01, T11);
-            T22 = _mm_sad_epu8(T02, T12);
-            T23 = _mm_sad_epu8(T03, T13);
-
-            T20 = _mm_add_epi16(T20, T21);
-            T22 = _mm_add_epi16(T22, T23);
-            sum0 = _mm_add_epi16(T20, T22);
-
-            sum1 = _mm_shuffle_epi32(sum0, 2);
-            sum0 = _mm_add_epi32(sum0, sum1);
-            res[1] = res[1] + _mm_cvtsi128_si32(sum0);
-
-            T10 = _mm_loadu_si128((__m128i*)(fref3 + (i + 0) * frefstride));
-            T11 = _mm_loadu_si128((__m128i*)(fref3 + (i + 1) * frefstride));
-            T12 = _mm_loadu_si128((__m128i*)(fref3 + (i + 2) * frefstride));
-            T13 = _mm_loadu_si128((__m128i*)(fref3 + (i + 3) * frefstride));
-
-            T20 = _mm_sad_epu8(T00, T10);
-            T21 = _mm_sad_epu8(T01, T11);
-            T22 = _mm_sad_epu8(T02, T12);
-            T23 = _mm_sad_epu8(T03, T13);
-
-            T20 = _mm_add_epi16(T20, T21);
-            T22 = _mm_add_epi16(T22, T23);
-            sum0 = _mm_add_epi16(T20, T22);
-
-            sum1 = _mm_shuffle_epi32(sum0, 2);
-            sum0 = _mm_add_epi32(sum0, sum1);
-            res[2] = res[2] + _mm_cvtsi128_si32(sum0);
-        }
-    }
-}
-
-#endif /* if INSTRSET >= X265_CPU_LEVEL_SSE41 */
-
-template<int ly>
-void sad_x3_24(pixel *fenc, pixel *fref1, pixel *fref2, pixel *fref3, intptr_t frefstride, int *res)
-{
-    Vec16uc m1, n1, n2, n3;
-
-    Vec4i sum1(0), sum2(0), sum3(0);
-    Vec8us sad1(0), sad2(0), sad3(0);
-    int max_iterators = (ly >> 4) << 4;
-    int row;
-
-    for (row = 0; row < max_iterators; row += 16)
-    {
-        for (int i = 0; i < 16; i++)
-        {
-            m1.load_a(fenc);
-            n1.load(fref1);
-            n2.load(fref2);
-            n3.load(fref3);
-
-            sad1.addSumAbsDiff(m1, n1);
-            sad2.addSumAbsDiff(m1, n2);
-            sad3.addSumAbsDiff(m1, n3);
-
-            m1.load_a(fenc + 16);
-            m1.cutoff(8);
-            n1.load(fref1 + 16);
-            n1.cutoff(8);
-            n2.load(fref2 + 16);
-            n2.cutoff(8);
-            n3.load(fref3 + 16);
-            n3.cutoff(8);
-
-            sad1.addSumAbsDiff(m1, n1);
-            sad2.addSumAbsDiff(m1, n2);
-            sad3.addSumAbsDiff(m1, n3);
-
-            fenc += FENC_STRIDE;
-            fref1 += frefstride;
-            fref2 += frefstride;
-            fref3 += frefstride;
-        }
-
-        sum1 += extend_low(sad1) + extend_high(sad1);
-        sum2 += extend_low(sad2) + extend_high(sad2);
-        sum3 += extend_low(sad3) + extend_high(sad3);
-        sad1 = 0;
-        sad2 = 0;
-        sad3 = 0;
-    }
-
-    while (row++ < ly)
-    {
-        m1.load_a(fenc);
-        n1.load(fref1);
-        n2.load(fref2);
-        n3.load(fref3);
-
-        sad1.addSumAbsDiff(m1, n1);
-        sad2.addSumAbsDiff(m1, n2);
-        sad3.addSumAbsDiff(m1, n3);
-
-        m1.load_a(fenc + 16);
-        m1.cutoff(8);
-        n1.load(fref1 + 16);
-        n1.cutoff(8);
-        n2.load(fref2 + 16);
-        n2.cutoff(8);
-        n3.load(fref3 + 16);
-        n3.cutoff(8);
-
-        sad1.addSumAbsDiff(m1, n1);
-        sad2.addSumAbsDiff(m1, n2);
-        sad3.addSumAbsDiff(m1, n3);
-
-        fenc += FENC_STRIDE;
-        fref1 += frefstride;
-        fref2 += frefstride;
-        fref3 += frefstride;
-    }
-
-    sum1 += extend_low(sad1) + extend_high(sad1);
-    sum2 += extend_low(sad2) + extend_high(sad2);
-    sum3 += extend_low(sad3) + extend_high(sad3);
-
-    res[0] = horizontal_add(sum1);
-    res[1] = horizontal_add(sum2);
-    res[2] = horizontal_add(sum3);
-}
-
-template<int ly>
-void sad_x3_32(pixel *fenc, pixel *fref1, pixel *fref2, pixel *fref3, intptr_t frefstride, int *res)
-{
-    Vec16uc m1, n1, n2, n3;
-
-    Vec4i sum1(0), sum2(0), sum3(0);
-    Vec8us sad1(0), sad2(0), sad3(0);
-    int max_iterators = (ly >> 3) << 3;
-    int row;
-
-    for (row = 0; row < max_iterators; row += 8)
-    {
-        for (int i = 0; i < 8; i++)
-        {
-            m1.load_a(fenc);
-            n1.load(fref1);
-            n2.load(fref2);
-            n3.load(fref3);
-
-            sad1.addSumAbsDiff(m1, n1);
-            sad2.addSumAbsDiff(m1, n2);
-            sad3.addSumAbsDiff(m1, n3);
-
-            m1.load_a(fenc + 16);
-            n1.load(fref1 + 16);
-            n2.load(fref2 + 16);
-            n3.load(fref3 + 16);
-
-            sad1.addSumAbsDiff(m1, n1);
-            sad2.addSumAbsDiff(m1, n2);
-            sad3.addSumAbsDiff(m1, n3);
-
-            fenc += FENC_STRIDE;
-            fref1 += frefstride;
-            fref2 += frefstride;
-            fref3 += frefstride;
-        }
-
-        sum1 += extend_low(sad1) + extend_high(sad1);
-        sum2 += extend_low(sad2) + extend_high(sad2);
-        sum3 += extend_low(sad3) + extend_high(sad3);
-        sad1 = 0;
-        sad2 = 0;
-        sad3 = 0;
-    }
-
-    while (row++ < ly)
-    {
-        m1.load_a(fenc);
-        n1.load(fref1);
-        n2.load(fref2);
-        n3.load(fref3);
-
-        sad1.addSumAbsDiff(m1, n1);
-        sad2.addSumAbsDiff(m1, n2);
-        sad3.addSumAbsDiff(m1, n3);
-
-        m1.load_a(fenc + 16);
-        n1.load(fref1 + 16);
-        n2.load(fref2 + 16);
-        n3.load(fref3 + 16);
-
-        sad1.addSumAbsDiff(m1, n1);
-        sad2.addSumAbsDiff(m1, n2);
-        sad3.addSumAbsDiff(m1, n3);
-
-        fenc += FENC_STRIDE;
-        fref1 += frefstride;
-        fref2 += frefstride;
-        fref3 += frefstride;
-    }
-
-    sum1 += extend_low(sad1) + extend_high(sad1);
-    sum2 += extend_low(sad2) + extend_high(sad2);
-    sum3 += extend_low(sad3) + extend_high(sad3);
-
-    res[0] = horizontal_add(sum1);
-    res[1] = horizontal_add(sum2);
-    res[2] = horizontal_add(sum3);
-}
-
-template<int ly>
-void sad_x3_48(pixel *fenc, pixel *fref1, pixel *fref2, pixel *fref3, intptr_t frefstride, int *res)
-{
-    Vec16uc m1, n1, n2, n3;
-
-    Vec4i sum1(0), sum2(0), sum3(0);
-    Vec8us sad1(0), sad2(0), sad3(0);
-    int max_iterators = (ly >> 3) << 3;
-    int row;
-
-    for (row = 0; row < max_iterators; row += 8)
-    {
-        for (int i = 0; i < 8; i++)
-        {
-            m1.load_a(fenc);
-            n1.load(fref1);
-            n2.load(fref2);
-            n3.load(fref3);
-
-            sad1.addSumAbsDiff(m1, n1);
-            sad2.addSumAbsDiff(m1, n2);
-            sad3.addSumAbsDiff(m1, n3);
-
-            m1.load_a(fenc + 16);
-            n1.load(fref1 + 16);
-            n2.load(fref2 + 16);
-            n3.load(fref3 + 16);
-
-            sad1.addSumAbsDiff(m1, n1);
-            sad2.addSumAbsDiff(m1, n2);
-            sad3.addSumAbsDiff(m1, n3);
-
-            m1.load_a(fenc + 32);
-            n1.load(fref1 + 32);
-            n2.load(fref2 + 32);
-            n3.load(fref3 + 32);
-
-            sad1.addSumAbsDiff(m1, n1);
-            sad2.addSumAbsDiff(m1, n2);
-            sad3.addSumAbsDiff(m1, n3);
-
-            fenc += FENC_STRIDE;
-            fref1 += frefstride;
-            fref2 += frefstride;
-            fref3 += frefstride;
-        }
-
-        sum1 += extend_low(sad1) + extend_high(sad1);
-        sum2 += extend_low(sad2) + extend_high(sad2);
-        sum3 += extend_low(sad3) + extend_high(sad3);
-        sad1 = 0;
-        sad2 = 0;
-        sad3 = 0;
-    }
-
-    while (row++ < ly)
-    {
-        m1.load_a(fenc);
-        n1.load(fref1);
-        n2.load(fref2);
-        n3.load(fref3);
-
-        sad1.addSumAbsDiff(m1, n1);
-        sad2.addSumAbsDiff(m1, n2);
-        sad3.addSumAbsDiff(m1, n3);
-
-        m1.load_a(fenc + 16);
-        n1.load(fref1 + 16);
-        n2.load(fref2 + 16);
-        n3.load(fref3 + 16);
-
-        sad1.addSumAbsDiff(m1, n1);
-        sad2.addSumAbsDiff(m1, n2);
-        sad3.addSumAbsDiff(m1, n3);
-
-        m1.load_a(fenc + 32);
-        n1.load(fref1 + 32);
-        n2.load(fref2 + 32);
-        n3.load(fref3 + 32);
-
-        sad1.addSumAbsDiff(m1, n1);
-        sad2.addSumAbsDiff(m1, n2);
-        sad3.addSumAbsDiff(m1, n3);
-
-        fenc += FENC_STRIDE;
-        fref1 += frefstride;
-        fref2 += frefstride;
-        fref3 += frefstride;
-    }
-
-    sum1 += extend_low(sad1) + extend_high(sad1);
-    sum2 += extend_low(sad2) + extend_high(sad2);
-    sum3 += extend_low(sad3) + extend_high(sad3);
-
-    res[0] = horizontal_add(sum1);
-    res[1] = horizontal_add(sum2);
-    res[2] = horizontal_add(sum3);
-}
-
-template<int ly>
-void sad_x3_64(pixel *fenc, pixel *fref1, pixel *fref2, pixel *fref3, intptr_t frefstride, int *res)
-{
-    Vec16uc m1, n1, n2, n3;
-
-    Vec4i sum1(0), sum2(0), sum3(0);
-    Vec8us sad1(0), sad2(0), sad3(0);
-    int row;
-
-    for (row = 0; row < ly; row += 4)
-    {
-        for (int i = 0; i < 4; i++)
-        {
-            m1.load_a(fenc);
-            n1.load(fref1);
-            n2.load(fref2);
-            n3.load(fref3);
-
-            sad1.addSumAbsDiff(m1, n1);
-            sad2.addSumAbsDiff(m1, n2);
-            sad3.addSumAbsDiff(m1, n3);
-
-            m1.load_a(fenc + 16);
-            n1.load(fref1 + 16);
-            n2.load(fref2 + 16);
-            n3.load(fref3 + 16);
-
-            sad1.addSumAbsDiff(m1, n1);
-            sad2.addSumAbsDiff(m1, n2);
-            sad3.addSumAbsDiff(m1, n3);
-
-            m1.load_a(fenc + 32);
-            n1.load(fref1 + 32);
-            n2.load(fref2 + 32);
-            n3.load(fref3 + 32);
-
-            sad1.addSumAbsDiff(m1, n1);
-            sad2.addSumAbsDiff(m1, n2);
-            sad3.addSumAbsDiff(m1, n3);
-
-            m1.load_a(fenc + 48);
-            n1.load(fref1 + 48);
-            n2.load(fref2 + 48);
-            n3.load(fref3 + 48);
-
-            sad1.addSumAbsDiff(m1, n1);
-            sad2.addSumAbsDiff(m1, n2);
-            sad3.addSumAbsDiff(m1, n3);
-
-            fenc += FENC_STRIDE;
-            fref1 += frefstride;
-            fref2 += frefstride;
-            fref3 += frefstride;
-        }
-
-        sum1 += extend_low(sad1) + extend_high(sad1);
-        sum2 += extend_low(sad2) + extend_high(sad2);
-        sum3 += extend_low(sad3) + extend_high(sad3);
-        sad1 = 0;
-        sad2 = 0;
-        sad3 = 0;
-    }
-
-    res[0] = horizontal_add(sum1);
-    res[1] = horizontal_add(sum2);
-    res[2] = horizontal_add(sum3);
-}
-
-#if INSTRSET >= X265_CPU_LEVEL_SSE41
-#if HAVE_MMX
-template<int ly>
-void sad_x4_4(pixel *fenc, pixel *fref1, pixel *fref2, pixel *fref3, pixel *fref4, intptr_t frefstride, int *res)
-{
-    assert((ly % 4) == 0);
-
-    __m64 sum0 = _mm_setzero_si64();
-    __m64 sum1 = _mm_setzero_si64();
-    __m64 sum2 = _mm_setzero_si64();
-    __m64 sum3 = _mm_setzero_si64();
-
-    __m64 T00, T01, T02, T03, T04, T05, T06, T07;
-    __m64 T0, T1, T2, T3, T4, T5, T6, T7;
-    __m64 T10, T11, T12, T13, T14, T15, T16, T17;
-    __m64 T20, T21, T22, T23, T24, T25, T26, T27;
-
-    if (ly == 4)
-    {
-        T00 = _mm_cvtsi32_si64(*(int*)(fenc + 0 * FENC_STRIDE));
-        T01 = _mm_cvtsi32_si64(*(int*)(fenc + 1 * FENC_STRIDE));
-        T02 = _mm_cvtsi32_si64(*(int*)(fenc + 2 * FENC_STRIDE));
-        T03 = _mm_cvtsi32_si64(*(int*)(fenc + 3 * FENC_STRIDE));
-
-        T10 = _mm_cvtsi32_si64(*(int*)(fref1 + 0 * frefstride));
-        T11 = _mm_cvtsi32_si64(*(int*)(fref1 + 1 * frefstride));
-        T12 = _mm_cvtsi32_si64(*(int*)(fref1 + 2 * frefstride));
-        T13 = _mm_cvtsi32_si64(*(int*)(fref1 + 3 * frefstride));
-
-        T20 = _mm_sad_pu8(T00, T10);
-        T21 = _mm_sad_pu8(T01, T11);
-        T22 = _mm_sad_pu8(T02, T12);
-        T23 = _mm_sad_pu8(T03, T13);
-
-        sum0 = _mm_add_pi16(sum0, T20);
-        sum0 = _mm_add_pi16(sum0, T21);
-        sum0 = _mm_add_pi16(sum0, T22);
-        sum0 = _mm_add_pi16(sum0, T23);
-
-        T10 = _mm_cvtsi32_si64(*(int*)(fref2 + 0 * frefstride));
-        T11 = _mm_cvtsi32_si64(*(int*)(fref2 + 1 * frefstride));
-        T12 = _mm_cvtsi32_si64(*(int*)(fref2 + 2 * frefstride));
-        T13 = _mm_cvtsi32_si64(*(int*)(fref2 + 3 * frefstride));
-
-        T20 = _mm_sad_pu8(T00, T10);
-        T21 = _mm_sad_pu8(T01, T11);
-        T22 = _mm_sad_pu8(T02, T12);
-        T23 = _mm_sad_pu8(T03, T13);
-
-        sum1 = _mm_add_pi16(sum1, T20);
-        sum1 = _mm_add_pi16(sum1, T21);
-        sum1 = _mm_add_pi16(sum1, T22);
-        sum1 = _mm_add_pi16(sum1, T23);
-
-        T10 = _mm_cvtsi32_si64(*(int*)(fref3 + 0 * frefstride));
-        T11 = _mm_cvtsi32_si64(*(int*)(fref3 + 1 * frefstride));
-        T12 = _mm_cvtsi32_si64(*(int*)(fref3 + 2 * frefstride));
-        T13 = _mm_cvtsi32_si64(*(int*)(fref3 + 3 * frefstride));
-
-        T20 = _mm_sad_pu8(T00, T10);
-        T21 = _mm_sad_pu8(T01, T11);
-        T22 = _mm_sad_pu8(T02, T12);
-        T23 = _mm_sad_pu8(T03, T13);
-
-        sum2 = _mm_add_pi16(sum2, T20);
-        sum2 = _mm_add_pi16(sum2, T21);
-        sum2 = _mm_add_pi16(sum2, T22);
-        sum2 = _mm_add_pi16(sum2, T23);
-
-        T10 = _mm_cvtsi32_si64(*(int*)(fref4 + 0 * frefstride));
-        T11 = _mm_cvtsi32_si64(*(int*)(fref4 + 1 * frefstride));
-        T12 = _mm_cvtsi32_si64(*(int*)(fref4 + 2 * frefstride));
-        T13 = _mm_cvtsi32_si64(*(int*)(fref4 + 3 * frefstride));
-
-        T20 = _mm_sad_pu8(T00, T10);
-        T21 = _mm_sad_pu8(T01, T11);
-        T22 = _mm_sad_pu8(T02, T12);
-        T23 = _mm_sad_pu8(T03, T13);
-
-        sum3 = _mm_add_pi16(sum3, T20);
-        sum3 = _mm_add_pi16(sum3, T21);
-        sum3 = _mm_add_pi16(sum3, T22);
-        sum3 = _mm_add_pi16(sum3, T23);
-    }
-    else if (ly == 8)
-    {
-        T00 = _mm_cvtsi32_si64(*(int*)(fenc + 0 * FENC_STRIDE));
-        T01 = _mm_cvtsi32_si64(*(int*)(fenc + 1 * FENC_STRIDE));
-        T02 = _mm_cvtsi32_si64(*(int*)(fenc + 2 * FENC_STRIDE));
-        T03 = _mm_cvtsi32_si64(*(int*)(fenc + 3 * FENC_STRIDE));
-        T04 = _mm_cvtsi32_si64(*(int*)(fenc + 4 * FENC_STRIDE));
-        T05 = _mm_cvtsi32_si64(*(int*)(fenc + 5 * FENC_STRIDE));
-        T06 = _mm_cvtsi32_si64(*(int*)(fenc + 6 * FENC_STRIDE));
-        T07 = _mm_cvtsi32_si64(*(int*)(fenc + 7 * FENC_STRIDE));
-
-        T10 = _mm_cvtsi32_si64(*(int*)(fref1 + 0 * frefstride));
-        T11 = _mm_cvtsi32_si64(*(int*)(fref1 + 1 * frefstride));
-        T12 = _mm_cvtsi32_si64(*(int*)(fref1 + 2 * frefstride));
-        T13 = _mm_cvtsi32_si64(*(int*)(fref1 + 3 * frefstride));
-        T14 = _mm_cvtsi32_si64(*(int*)(fref1 + 4 * frefstride));
-        T15 = _mm_cvtsi32_si64(*(int*)(fref1 + 5 * frefstride));
-        T16 = _mm_cvtsi32_si64(*(int*)(fref1 + 6 * frefstride));
-        T17 = _mm_cvtsi32_si64(*(int*)(fref1 + 7 * frefstride));
-
-        T20 = _mm_sad_pu8(T00, T10);
-        T21 = _mm_sad_pu8(T01, T11);
-        T22 = _mm_sad_pu8(T02, T12);
-        T23 = _mm_sad_pu8(T03, T13);
-        T24 = _mm_sad_pu8(T04, T14);
-        T25 = _mm_sad_pu8(T05, T15);
-        T26 = _mm_sad_pu8(T06, T16);
-        T27 = _mm_sad_pu8(T07, T17);
-
-        sum0 = _mm_add_pi16(sum0, T20);
-        sum0 = _mm_add_pi16(sum0, T21);
-        sum0 = _mm_add_pi16(sum0, T22);
-        sum0 = _mm_add_pi16(sum0, T23);
-        sum0 = _mm_add_pi16(sum0, T24);
-        sum0 = _mm_add_pi16(sum0, T25);
-        sum0 = _mm_add_pi16(sum0, T26);
-        sum0 = _mm_add_pi16(sum0, T27);
-
-        T10 = _mm_cvtsi32_si64(*(int*)(fref2 + 0 * frefstride));
-        T11 = _mm_cvtsi32_si64(*(int*)(fref2 + 1 * frefstride));
-        T12 = _mm_cvtsi32_si64(*(int*)(fref2 + 2 * frefstride));
-        T13 = _mm_cvtsi32_si64(*(int*)(fref2 + 3 * frefstride));
-        T14 = _mm_cvtsi32_si64(*(int*)(fref2 + 4 * frefstride));
-        T15 = _mm_cvtsi32_si64(*(int*)(fref2 + 5 * frefstride));
-        T16 = _mm_cvtsi32_si64(*(int*)(fref2 + 6 * frefstride));
-        T17 = _mm_cvtsi32_si64(*(int*)(fref2 + 7 * frefstride));
-
-        T20 = _mm_sad_pu8(T00, T10);
-        T21 = _mm_sad_pu8(T01, T11);
-        T22 = _mm_sad_pu8(T02, T12);
-        T23 = _mm_sad_pu8(T03, T13);
-        T24 = _mm_sad_pu8(T04, T14);
-        T25 = _mm_sad_pu8(T05, T15);
-        T26 = _mm_sad_pu8(T06, T16);
-        T27 = _mm_sad_pu8(T07, T17);
-
-        sum1 = _mm_add_pi16(sum1, T20);
-        sum1 = _mm_add_pi16(sum1, T21);
-        sum1 = _mm_add_pi16(sum1, T22);
-        sum1 = _mm_add_pi16(sum1, T23);
-        sum1 = _mm_add_pi16(sum1, T24);
-        sum1 = _mm_add_pi16(sum1, T25);
-        sum1 = _mm_add_pi16(sum1, T26);
-        sum1 = _mm_add_pi16(sum1, T27);
-
-        T10 = _mm_cvtsi32_si64(*(int*)(fref3 + 0 * frefstride));
-        T11 = _mm_cvtsi32_si64(*(int*)(fref3 + 1 * frefstride));
-        T12 = _mm_cvtsi32_si64(*(int*)(fref3 + 2 * frefstride));
-        T13 = _mm_cvtsi32_si64(*(int*)(fref3 + 3 * frefstride));
-        T14 = _mm_cvtsi32_si64(*(int*)(fref3 + 4 * frefstride));
-        T15 = _mm_cvtsi32_si64(*(int*)(fref3 + 5 * frefstride));
-        T16 = _mm_cvtsi32_si64(*(int*)(fref3 + 6 * frefstride));
-        T17 = _mm_cvtsi32_si64(*(int*)(fref3 + 7 * frefstride));
-
-        T20 = _mm_sad_pu8(T00, T10);
-        T21 = _mm_sad_pu8(T01, T11);
-        T22 = _mm_sad_pu8(T02, T12);
-        T23 = _mm_sad_pu8(T03, T13);
-        T24 = _mm_sad_pu8(T04, T14);
-        T25 = _mm_sad_pu8(T05, T15);
-        T26 = _mm_sad_pu8(T06, T16);
-        T27 = _mm_sad_pu8(T07, T17);
-
-        sum2 = _mm_add_pi16(sum2, T20);
-        sum2 = _mm_add_pi16(sum2, T21);
-        sum2 = _mm_add_pi16(sum2, T22);
-        sum2 = _mm_add_pi16(sum2, T23);
-        sum2 = _mm_add_pi16(sum2, T24);
-        sum2 = _mm_add_pi16(sum2, T25);
-        sum2 = _mm_add_pi16(sum2, T26);
-        sum2 = _mm_add_pi16(sum2, T27);
-
-        T10 = _mm_cvtsi32_si64(*(int*)(fref4 + 0 * frefstride));
-        T11 = _mm_cvtsi32_si64(*(int*)(fref4 + 1 * frefstride));
-        T12 = _mm_cvtsi32_si64(*(int*)(fref4 + 2 * frefstride));
-        T13 = _mm_cvtsi32_si64(*(int*)(fref4 + 3 * frefstride));
-        T14 = _mm_cvtsi32_si64(*(int*)(fref4 + 4 * frefstride));
-        T15 = _mm_cvtsi32_si64(*(int*)(fref4 + 5 * frefstride));
-        T16 = _mm_cvtsi32_si64(*(int*)(fref4 + 6 * frefstride));
-        T17 = _mm_cvtsi32_si64(*(int*)(fref4 + 7 * frefstride));
-
-        T20 = _mm_sad_pu8(T00, T10);
-        T21 = _mm_sad_pu8(T01, T11);
-        T22 = _mm_sad_pu8(T02, T12);
-        T23 = _mm_sad_pu8(T03, T13);
-        T24 = _mm_sad_pu8(T04, T14);
-        T25 = _mm_sad_pu8(T05, T15);
-        T26 = _mm_sad_pu8(T06, T16);
-        T27 = _mm_sad_pu8(T07, T17);
-
-        sum3 = _mm_add_pi16(sum3, T20);
-        sum3 = _mm_add_pi16(sum3, T21);
-        sum3 = _mm_add_pi16(sum3, T22);
-        sum3 = _mm_add_pi16(sum3, T23);
-        sum3 = _mm_add_pi16(sum3, T24);
-        sum3 = _mm_add_pi16(sum3, T25);
-        sum3 = _mm_add_pi16(sum3, T26);
-        sum3 = _mm_add_pi16(sum3, T27);
-    }
-    else if (ly == 16)
-    {
-        T00 = _mm_cvtsi32_si64(*(int*)(fenc + 0 * FENC_STRIDE));
-        T01 = _mm_cvtsi32_si64(*(int*)(fenc + 1 * FENC_STRIDE));
-        T02 = _mm_cvtsi32_si64(*(int*)(fenc + 2 * FENC_STRIDE));
-        T03 = _mm_cvtsi32_si64(*(int*)(fenc + 3 * FENC_STRIDE));
-        T04 = _mm_cvtsi32_si64(*(int*)(fenc + 4 * FENC_STRIDE));
-        T05 = _mm_cvtsi32_si64(*(int*)(fenc + 5 * FENC_STRIDE));
-        T06 = _mm_cvtsi32_si64(*(int*)(fenc + 6 * FENC_STRIDE));
-        T07 = _mm_cvtsi32_si64(*(int*)(fenc + 7 * FENC_STRIDE));
-        T0 = _mm_cvtsi32_si64(*(int*)(fenc +  8 * FENC_STRIDE));
-        T1 = _mm_cvtsi32_si64(*(int*)(fenc +  9 * FENC_STRIDE));
-        T2 = _mm_cvtsi32_si64(*(int*)(fenc +  10 * FENC_STRIDE));
-        T3 = _mm_cvtsi32_si64(*(int*)(fenc +  11 * FENC_STRIDE));
-        T4 = _mm_cvtsi32_si64(*(int*)(fenc + 12 * FENC_STRIDE));
-        T5 = _mm_cvtsi32_si64(*(int*)(fenc + 13 * FENC_STRIDE));
-        T6 = _mm_cvtsi32_si64(*(int*)(fenc + 14 * FENC_STRIDE));
-        T7 = _mm_cvtsi32_si64(*(int*)(fenc + 15 * FENC_STRIDE));
-
-        T10 = _mm_cvtsi32_si64(*(int*)(fref1 + 0 * frefstride));
-        T11 = _mm_cvtsi32_si64(*(int*)(fref1 + 1 * frefstride));
-        T12 = _mm_cvtsi32_si64(*(int*)(fref1 + 2 * frefstride));
-        T13 = _mm_cvtsi32_si64(*(int*)(fref1 + 3 * frefstride));
-        T14 = _mm_cvtsi32_si64(*(int*)(fref1 + 4 * frefstride));
-        T15 = _mm_cvtsi32_si64(*(int*)(fref1 + 5 * frefstride));
-        T16 = _mm_cvtsi32_si64(*(int*)(fref1 + 6 * frefstride));
-        T17 = _mm_cvtsi32_si64(*(int*)(fref1 + 7 * frefstride));
-
-        T20 = _mm_sad_pu8(T00, T10);
-        T21 = _mm_sad_pu8(T01, T11);
-        T22 = _mm_sad_pu8(T02, T12);
-        T23 = _mm_sad_pu8(T03, T13);
-        T24 = _mm_sad_pu8(T04, T14);
-        T25 = _mm_sad_pu8(T05, T15);
-        T26 = _mm_sad_pu8(T06, T16);
-        T27 = _mm_sad_pu8(T07, T17);
-
-        sum0 = _mm_add_pi16(sum0, T20);
-        sum0 = _mm_add_pi16(sum0, T21);
-        sum0 = _mm_add_pi16(sum0, T22);
-        sum0 = _mm_add_pi16(sum0, T23);
-        sum0 = _mm_add_pi16(sum0, T24);
-        sum0 = _mm_add_pi16(sum0, T25);
-        sum0 = _mm_add_pi16(sum0, T26);
-        sum0 = _mm_add_pi16(sum0, T27);
-
-        T10 = _mm_cvtsi32_si64(*(int*)(fref1 + 8 * frefstride));
-        T11 = _mm_cvtsi32_si64(*(int*)(fref1 + 9 * frefstride));
-        T12 = _mm_cvtsi32_si64(*(int*)(fref1 + 10 * frefstride));
-        T13 = _mm_cvtsi32_si64(*(int*)(fref1 + 11 * frefstride));
-        T14 = _mm_cvtsi32_si64(*(int*)(fref1 + 12 * frefstride));
-        T15 = _mm_cvtsi32_si64(*(int*)(fref1 + 13 * frefstride));
-        T16 = _mm_cvtsi32_si64(*(int*)(fref1 + 14 * frefstride));
-        T17 = _mm_cvtsi32_si64(*(int*)(fref1 + 15 * frefstride));
-
-        T20 = _mm_sad_pu8(T0, T10);
-        T21 = _mm_sad_pu8(T1, T11);
-        T22 = _mm_sad_pu8(T2, T12);
-        T23 = _mm_sad_pu8(T3, T13);
-        T24 = _mm_sad_pu8(T4, T14);
-        T25 = _mm_sad_pu8(T5, T15);
-        T26 = _mm_sad_pu8(T6, T16);
-        T27 = _mm_sad_pu8(T7, T17);
-
-        sum0 = _mm_add_pi16(sum0, T20);
-        sum0 = _mm_add_pi16(sum0, T21);
-        sum0 = _mm_add_pi16(sum0, T22);
-        sum0 = _mm_add_pi16(sum0, T23);
-        sum0 = _mm_add_pi16(sum0, T24);
-        sum0 = _mm_add_pi16(sum0, T25);
-        sum0 = _mm_add_pi16(sum0, T26);
-        sum0 = _mm_add_pi16(sum0, T27);
-
-        T10 = _mm_cvtsi32_si64(*(int*)(fref2 + 0 * frefstride));
-        T11 = _mm_cvtsi32_si64(*(int*)(fref2 + 1 * frefstride));
-        T12 = _mm_cvtsi32_si64(*(int*)(fref2 + 2 * frefstride));
-        T13 = _mm_cvtsi32_si64(*(int*)(fref2 + 3 * frefstride));
-        T14 = _mm_cvtsi32_si64(*(int*)(fref2 + 4 * frefstride));
-        T15 = _mm_cvtsi32_si64(*(int*)(fref2 + 5 * frefstride));
-        T16 = _mm_cvtsi32_si64(*(int*)(fref2 + 6 * frefstride));
-        T17 = _mm_cvtsi32_si64(*(int*)(fref2 + 7 * frefstride));
-
-        T20 = _mm_sad_pu8(T00, T10);
-        T21 = _mm_sad_pu8(T01, T11);
-        T22 = _mm_sad_pu8(T02, T12);
-        T23 = _mm_sad_pu8(T03, T13);
-        T24 = _mm_sad_pu8(T04, T14);
-        T25 = _mm_sad_pu8(T05, T15);
-        T26 = _mm_sad_pu8(T06, T16);
-        T27 = _mm_sad_pu8(T07, T17);
-
-        sum1 = _mm_add_pi16(sum1, T20);
-        sum1 = _mm_add_pi16(sum1, T21);
-        sum1 = _mm_add_pi16(sum1, T22);
-        sum1 = _mm_add_pi16(sum1, T23);
-        sum1 = _mm_add_pi16(sum1, T24);
-        sum1 = _mm_add_pi16(sum1, T25);
-        sum1 = _mm_add_pi16(sum1, T26);
-        sum1 = _mm_add_pi16(sum1, T27);
-
-        T10 = _mm_cvtsi32_si64(*(int*)(fref2 + 8 * frefstride));
-        T11 = _mm_cvtsi32_si64(*(int*)(fref2 + 9 * frefstride));
-        T12 = _mm_cvtsi32_si64(*(int*)(fref2 + 10 * frefstride));
-        T13 = _mm_cvtsi32_si64(*(int*)(fref2 + 11 * frefstride));
-        T14 = _mm_cvtsi32_si64(*(int*)(fref2 + 12 * frefstride));
-        T15 = _mm_cvtsi32_si64(*(int*)(fref2 + 13 * frefstride));
-        T16 = _mm_cvtsi32_si64(*(int*)(fref2 + 14 * frefstride));
-        T17 = _mm_cvtsi32_si64(*(int*)(fref2 + 15 * frefstride));
-
-        T20 = _mm_sad_pu8(T0, T10);
-        T21 = _mm_sad_pu8(T1, T11);
-        T22 = _mm_sad_pu8(T2, T12);
-        T23 = _mm_sad_pu8(T3, T13);
-        T24 = _mm_sad_pu8(T4, T14);
-        T25 = _mm_sad_pu8(T5, T15);
-        T26 = _mm_sad_pu8(T6, T16);
-        T27 = _mm_sad_pu8(T7, T17);
-
-        sum1 = _mm_add_pi16(sum1, T20);
-        sum1 = _mm_add_pi16(sum1, T21);
-        sum1 = _mm_add_pi16(sum1, T22);
-        sum1 = _mm_add_pi16(sum1, T23);
-        sum1 = _mm_add_pi16(sum1, T24);
-        sum1 = _mm_add_pi16(sum1, T25);
-        sum1 = _mm_add_pi16(sum1, T26);
-        sum1 = _mm_add_pi16(sum1, T27);
-
-        T10 = _mm_cvtsi32_si64(*(int*)(fref3 + 0 * frefstride));
-        T11 = _mm_cvtsi32_si64(*(int*)(fref3 + 1 * frefstride));
-        T12 = _mm_cvtsi32_si64(*(int*)(fref3 + 2 * frefstride));
-        T13 = _mm_cvtsi32_si64(*(int*)(fref3 + 3 * frefstride));
-        T14 = _mm_cvtsi32_si64(*(int*)(fref3 + 4 * frefstride));
-        T15 = _mm_cvtsi32_si64(*(int*)(fref3 + 5 * frefstride));
-        T16 = _mm_cvtsi32_si64(*(int*)(fref3 + 6 * frefstride));
-        T17 = _mm_cvtsi32_si64(*(int*)(fref3 + 7 * frefstride));
-
-        T20 = _mm_sad_pu8(T00, T10);
-        T21 = _mm_sad_pu8(T01, T11);
-        T22 = _mm_sad_pu8(T02, T12);
-        T23 = _mm_sad_pu8(T03, T13);
-        T24 = _mm_sad_pu8(T04, T14);
-        T25 = _mm_sad_pu8(T05, T15);
-        T26 = _mm_sad_pu8(T06, T16);
-        T27 = _mm_sad_pu8(T07, T17);
-
-        sum2 = _mm_add_pi16(sum2, T20);
-        sum2 = _mm_add_pi16(sum2, T21);
-        sum2 = _mm_add_pi16(sum2, T22);
-        sum2 = _mm_add_pi16(sum2, T23);
-        sum2 = _mm_add_pi16(sum2, T24);
-        sum2 = _mm_add_pi16(sum2, T25);
-        sum2 = _mm_add_pi16(sum2, T26);
-        sum2 = _mm_add_pi16(sum2, T27);
-
-        T10 = _mm_cvtsi32_si64(*(int*)(fref3 + 8 * frefstride));
-        T11 = _mm_cvtsi32_si64(*(int*)(fref3 + 9 * frefstride));
-        T12 = _mm_cvtsi32_si64(*(int*)(fref3 + 10 * frefstride));
-        T13 = _mm_cvtsi32_si64(*(int*)(fref3 + 11 * frefstride));
-        T14 = _mm_cvtsi32_si64(*(int*)(fref3 + 12 * frefstride));
-        T15 = _mm_cvtsi32_si64(*(int*)(fref3 + 13 * frefstride));
-        T16 = _mm_cvtsi32_si64(*(int*)(fref3 + 14 * frefstride));
-        T17 = _mm_cvtsi32_si64(*(int*)(fref3 + 15 * frefstride));
-
-        T20 = _mm_sad_pu8(T0, T10);
-        T21 = _mm_sad_pu8(T1, T11);
-        T22 = _mm_sad_pu8(T2, T12);
-        T23 = _mm_sad_pu8(T3, T13);
-        T24 = _mm_sad_pu8(T4, T14);
-        T25 = _mm_sad_pu8(T5, T15);
-        T26 = _mm_sad_pu8(T6, T16);
-        T27 = _mm_sad_pu8(T7, T17);
-
-        sum2 = _mm_add_pi16(sum2, T20);
-        sum2 = _mm_add_pi16(sum2, T21);
-        sum2 = _mm_add_pi16(sum2, T22);
-        sum2 = _mm_add_pi16(sum2, T23);
-        sum2 = _mm_add_pi16(sum2, T24);
-        sum2 = _mm_add_pi16(sum2, T25);
-        sum2 = _mm_add_pi16(sum2, T26);
-        sum2 = _mm_add_pi16(sum2, T27);
-
-        T10 = _mm_cvtsi32_si64(*(int*)(fref4 + 0 * frefstride));
-        T11 = _mm_cvtsi32_si64(*(int*)(fref4 + 1 * frefstride));
-        T12 = _mm_cvtsi32_si64(*(int*)(fref4 + 2 * frefstride));
-        T13 = _mm_cvtsi32_si64(*(int*)(fref4 + 3 * frefstride));
-        T14 = _mm_cvtsi32_si64(*(int*)(fref4 + 4 * frefstride));
-        T15 = _mm_cvtsi32_si64(*(int*)(fref4 + 5 * frefstride));
-        T16 = _mm_cvtsi32_si64(*(int*)(fref4 + 6 * frefstride));
-        T17 = _mm_cvtsi32_si64(*(int*)(fref4 + 7 * frefstride));
-
-        T20 = _mm_sad_pu8(T00, T10);
-        T21 = _mm_sad_pu8(T01, T11);
-        T22 = _mm_sad_pu8(T02, T12);
-        T23 = _mm_sad_pu8(T03, T13);
-        T24 = _mm_sad_pu8(T04, T14);
-        T25 = _mm_sad_pu8(T05, T15);
-        T26 = _mm_sad_pu8(T06, T16);
-        T27 = _mm_sad_pu8(T07, T17);
-
-        sum3 = _mm_add_pi16(sum3, T20);
-        sum3 = _mm_add_pi16(sum3, T21);
-        sum3 = _mm_add_pi16(sum3, T22);
-        sum3 = _mm_add_pi16(sum3, T23);
-        sum3 = _mm_add_pi16(sum3, T24);
-        sum3 = _mm_add_pi16(sum3, T25);
-        sum3 = _mm_add_pi16(sum3, T26);
-        sum3 = _mm_add_pi16(sum3, T27);
-
-        T10 = _mm_cvtsi32_si64(*(int*)(fref4 + 8 * frefstride));
-        T11 = _mm_cvtsi32_si64(*(int*)(fref4 + 9 * frefstride));
-        T12 = _mm_cvtsi32_si64(*(int*)(fref4 + 10 * frefstride));
-        T13 = _mm_cvtsi32_si64(*(int*)(fref4 + 11 * frefstride));
-        T14 = _mm_cvtsi32_si64(*(int*)(fref4 + 12 * frefstride));
-        T15 = _mm_cvtsi32_si64(*(int*)(fref4 + 13 * frefstride));
-        T16 = _mm_cvtsi32_si64(*(int*)(fref4 + 14 * frefstride));
-        T17 = _mm_cvtsi32_si64(*(int*)(fref4 + 15 * frefstride));
-
-        T20 = _mm_sad_pu8(T0, T10);
-        T21 = _mm_sad_pu8(T1, T11);
-        T22 = _mm_sad_pu8(T2, T12);
-        T23 = _mm_sad_pu8(T3, T13);
-        T24 = _mm_sad_pu8(T4, T14);
-        T25 = _mm_sad_pu8(T5, T15);
-        T26 = _mm_sad_pu8(T6, T16);
-        T27 = _mm_sad_pu8(T7, T17);
-
-        sum3 = _mm_add_pi16(sum3, T20);
-        sum3 = _mm_add_pi16(sum3, T21);
-        sum3 = _mm_add_pi16(sum3, T22);
-        sum3 = _mm_add_pi16(sum3, T23);
-        sum3 = _mm_add_pi16(sum3, T24);
-        sum3 = _mm_add_pi16(sum3, T25);
-        sum3 = _mm_add_pi16(sum3, T26);
-        sum3 = _mm_add_pi16(sum3, T27);
-    }
-    else if ((ly % 8) == 0)
-    {
-        for (int i = 0; i < ly; i += 8)
-        {
-            T00 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 0) * FENC_STRIDE));
-            T01 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 1) * FENC_STRIDE));
-            T02 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 2) * FENC_STRIDE));
-            T03 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 3) * FENC_STRIDE));
-            T04 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 4) * FENC_STRIDE));
-            T05 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 5) * FENC_STRIDE));
-            T06 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 6) * FENC_STRIDE));
-            T07 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 7) * FENC_STRIDE));
-
-            T10 = _mm_cvtsi32_si64(*(int*)(fref1 + (i + 0) * frefstride));
-            T11 = _mm_cvtsi32_si64(*(int*)(fref1 + (i + 1) * frefstride));
-            T12 = _mm_cvtsi32_si64(*(int*)(fref1 + (i + 2) * frefstride));
-            T13 = _mm_cvtsi32_si64(*(int*)(fref1 + (i + 3) * frefstride));
-            T14 = _mm_cvtsi32_si64(*(int*)(fref1 + (i + 4) * frefstride));
-            T15 = _mm_cvtsi32_si64(*(int*)(fref1 + (i + 5) * frefstride));
-            T16 = _mm_cvtsi32_si64(*(int*)(fref1 + (i + 6) * frefstride));
-            T17 = _mm_cvtsi32_si64(*(int*)(fref1 + (i + 7) * frefstride));
-
-            T20 = _mm_sad_pu8(T00, T10);
-            T21 = _mm_sad_pu8(T01, T11);
-            T22 = _mm_sad_pu8(T02, T12);
-            T23 = _mm_sad_pu8(T03, T13);
-            T24 = _mm_sad_pu8(T04, T14);
-            T25 = _mm_sad_pu8(T05, T15);
-            T26 = _mm_sad_pu8(T06, T16);
-            T27 = _mm_sad_pu8(T07, T17);
-
-            sum0 = _mm_add_pi16(sum0, T20);
-            sum0 = _mm_add_pi16(sum0, T21);
-            sum0 = _mm_add_pi16(sum0, T22);
-            sum0 = _mm_add_pi16(sum0, T23);
-            sum0 = _mm_add_pi16(sum0, T24);
-            sum0 = _mm_add_pi16(sum0, T25);
-            sum0 = _mm_add_pi16(sum0, T26);
-            sum0 = _mm_add_pi16(sum0, T27);
-
-            T10 = _mm_cvtsi32_si64(*(int*)(fref2 + (i + 0) * frefstride));
-            T11 = _mm_cvtsi32_si64(*(int*)(fref2 + (i + 1) * frefstride));
-            T12 = _mm_cvtsi32_si64(*(int*)(fref2 + (i + 2) * frefstride));
-            T13 = _mm_cvtsi32_si64(*(int*)(fref2 + (i + 3) * frefstride));
-            T14 = _mm_cvtsi32_si64(*(int*)(fref2 + (i + 4) * frefstride));
-            T15 = _mm_cvtsi32_si64(*(int*)(fref2 + (i + 5) * frefstride));
-            T16 = _mm_cvtsi32_si64(*(int*)(fref2 + (i + 6) * frefstride));
-            T17 = _mm_cvtsi32_si64(*(int*)(fref2 + (i + 7) * frefstride));
-
-            T20 = _mm_sad_pu8(T00, T10);
-            T21 = _mm_sad_pu8(T01, T11);
-            T22 = _mm_sad_pu8(T02, T12);
-            T23 = _mm_sad_pu8(T03, T13);
-            T24 = _mm_sad_pu8(T04, T14);
-            T25 = _mm_sad_pu8(T05, T15);
-            T26 = _mm_sad_pu8(T06, T16);
-            T27 = _mm_sad_pu8(T07, T17);
-
-            sum1 = _mm_add_pi16(sum1, T20);
-            sum1 = _mm_add_pi16(sum1, T21);
-            sum1 = _mm_add_pi16(sum1, T22);
-            sum1 = _mm_add_pi16(sum1, T23);
-            sum1 = _mm_add_pi16(sum1, T24);
-            sum1 = _mm_add_pi16(sum1, T25);
-            sum1 = _mm_add_pi16(sum1, T26);
-            sum1 = _mm_add_pi16(sum1, T27);
-
-            T10 = _mm_cvtsi32_si64(*(int*)(fref3 + (i + 0) * frefstride));
-            T11 = _mm_cvtsi32_si64(*(int*)(fref3 + (i + 1) * frefstride));
-            T12 = _mm_cvtsi32_si64(*(int*)(fref3 + (i + 2) * frefstride));
-            T13 = _mm_cvtsi32_si64(*(int*)(fref3 + (i + 3) * frefstride));
-            T14 = _mm_cvtsi32_si64(*(int*)(fref3 + (i + 4) * frefstride));
-            T15 = _mm_cvtsi32_si64(*(int*)(fref3 + (i + 5) * frefstride));
-            T16 = _mm_cvtsi32_si64(*(int*)(fref3 + (i + 6) * frefstride));
-            T17 = _mm_cvtsi32_si64(*(int*)(fref3 + (i + 7) * frefstride));
-
-            T20 = _mm_sad_pu8(T00, T10);
-            T21 = _mm_sad_pu8(T01, T11);
-            T22 = _mm_sad_pu8(T02, T12);
-            T23 = _mm_sad_pu8(T03, T13);
-            T24 = _mm_sad_pu8(T04, T14);
-            T25 = _mm_sad_pu8(T05, T15);
-            T26 = _mm_sad_pu8(T06, T16);
-            T27 = _mm_sad_pu8(T07, T17);
-
-            sum2 = _mm_add_pi16(sum2, T20);
-            sum2 = _mm_add_pi16(sum2, T21);
-            sum2 = _mm_add_pi16(sum2, T22);
-            sum2 = _mm_add_pi16(sum2, T23);
-            sum2 = _mm_add_pi16(sum2, T24);
-            sum2 = _mm_add_pi16(sum2, T25);
-            sum2 = _mm_add_pi16(sum2, T26);
-            sum2 = _mm_add_pi16(sum2, T27);
-
-            T10 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 0) * frefstride));
-            T11 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 1) * frefstride));
-            T12 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 2) * frefstride));
-            T13 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 3) * frefstride));
-            T14 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 4) * frefstride));
-            T15 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 5) * frefstride));
-            T16 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 6) * frefstride));
-            T17 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 7) * frefstride));
-
-            T20 = _mm_sad_pu8(T00, T10);
-            T21 = _mm_sad_pu8(T01, T11);
-            T22 = _mm_sad_pu8(T02, T12);
-            T23 = _mm_sad_pu8(T03, T13);
-            T24 = _mm_sad_pu8(T04, T14);
-            T25 = _mm_sad_pu8(T05, T15);
-            T26 = _mm_sad_pu8(T06, T16);
-            T27 = _mm_sad_pu8(T07, T17);
-
-            sum3 = _mm_add_pi16(sum3, T20);
-            sum3 = _mm_add_pi16(sum3, T21);
-            sum3 = _mm_add_pi16(sum3, T22);
-            sum3 = _mm_add_pi16(sum3, T23);
-            sum3 = _mm_add_pi16(sum3, T24);
-            sum3 = _mm_add_pi16(sum3, T25);
-            sum3 = _mm_add_pi16(sum3, T26);
-            sum3 = _mm_add_pi16(sum3, T27);
-        }
-    }
-    else
-    {
-        for (int i = 0; i < ly; i += 4)
-        {
-            T00 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 0) * FENC_STRIDE));
-            T01 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 1) * FENC_STRIDE));
-            T02 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 2) * FENC_STRIDE));
-            T03 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 3) * FENC_STRIDE));
-
-            T10 = _mm_cvtsi32_si64(*(int*)(fref1 + (i + 0) * frefstride));
-            T11 = _mm_cvtsi32_si64(*(int*)(fref1 + (i + 1) * frefstride));
-            T12 = _mm_cvtsi32_si64(*(int*)(fref1 + (i + 2) * frefstride));
-            T13 = _mm_cvtsi32_si64(*(int*)(fref1 + (i + 3) * frefstride));
-
-            T20 = _mm_sad_pu8(T00, T10);
-            T21 = _mm_sad_pu8(T01, T11);
-            T22 = _mm_sad_pu8(T02, T12);
-            T23 = _mm_sad_pu8(T03, T13);
-
-            sum0 = _mm_add_pi16(sum0, T20);
-            sum0 = _mm_add_pi16(sum0, T21);
-            sum0 = _mm_add_pi16(sum0, T22);
-            sum0 = _mm_add_pi16(sum0, T23);
-
-            T10 = _mm_cvtsi32_si64(*(int*)(fref2 + (i + 0) * frefstride));
-            T11 = _mm_cvtsi32_si64(*(int*)(fref2 + (i + 1) * frefstride));
-            T12 = _mm_cvtsi32_si64(*(int*)(fref2 + (i + 2) * frefstride));
-            T13 = _mm_cvtsi32_si64(*(int*)(fref2 + (i + 3) * frefstride));
-
-            T20 = _mm_sad_pu8(T00, T10);
-            T21 = _mm_sad_pu8(T01, T11);
-            T22 = _mm_sad_pu8(T02, T12);
-            T23 = _mm_sad_pu8(T03, T13);
-
-            sum1 = _mm_add_pi16(sum1, T20);
-            sum1 = _mm_add_pi16(sum1, T21);
-            sum1 = _mm_add_pi16(sum1, T22);
-            sum1 = _mm_add_pi16(sum1, T23);
-
-            T10 = _mm_cvtsi32_si64(*(int*)(fref3 + (i + 0) * frefstride));
-            T11 = _mm_cvtsi32_si64(*(int*)(fref3 + (i + 1) * frefstride));
-            T12 = _mm_cvtsi32_si64(*(int*)(fref3 + (i + 2) * frefstride));
-            T13 = _mm_cvtsi32_si64(*(int*)(fref3 + (i + 3) * frefstride));
-
-            T20 = _mm_sad_pu8(T00, T10);
-            T21 = _mm_sad_pu8(T01, T11);
-            T22 = _mm_sad_pu8(T02, T12);
-            T23 = _mm_sad_pu8(T03, T13);
-
-            sum2 = _mm_add_pi16(sum2, T20);
-            sum2 = _mm_add_pi16(sum2, T21);
-            sum2 = _mm_add_pi16(sum2, T22);
-            sum2 = _mm_add_pi16(sum2, T23);
-
-            T10 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 0) * frefstride));
-            T11 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 1) * frefstride));
-            T12 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 2) * frefstride));
-            T13 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 3) * frefstride));
-
-            T20 = _mm_sad_pu8(T00, T10);
-            T21 = _mm_sad_pu8(T01, T11);
-            T22 = _mm_sad_pu8(T02, T12);
-            T23 = _mm_sad_pu8(T03, T13);
-
-            sum3 = _mm_add_pi16(sum3, T20);
-            sum3 = _mm_add_pi16(sum3, T21);
-            sum3 = _mm_add_pi16(sum3, T22);
-            sum3 = _mm_add_pi16(sum3, T23);
-        }
-    }
-
-    res[0] = _m_to_int(sum0);
-    res[1] = _m_to_int(sum1);
-    res[2] = _m_to_int(sum2);
-    res[3] = _m_to_int(sum3);
-}
-
-#else /* if HAVE_MMX */
-
-template<int ly>
-void sad_x4_4(pixel *fenc, pixel *fref1, pixel *fref2, pixel *fref3, pixel *fref4, intptr_t frefstride, int *res)
-{
-    assert((ly % 4) == 0);
-    __m128i sum0 = _mm_setzero_si128();
-    __m128i sum1 = _mm_setzero_si128();
-    __m128i sum2 = _mm_setzero_si128();
-    __m128i sum3 = _mm_setzero_si128();
-
-    __m128i T00, T01, T02, T03;
-    __m128i T10, T11, T12, T13;
-    __m128i R00, R01, R02, R03, R04;
-    __m128i T20;
-
-    if (ly == 4)
-    {
-        T00 = _mm_loadl_epi64((__m128i*)(fenc + (0) * FENC_STRIDE));
-        T01 = _mm_loadl_epi64((__m128i*)(fenc + (1) * FENC_STRIDE));
-        T01 = _mm_unpacklo_epi32(T00, T01);
-        T02 = _mm_loadl_epi64((__m128i*)(fenc + (2) * FENC_STRIDE));
-        T03 = _mm_loadl_epi64((__m128i*)(fenc + (3) * FENC_STRIDE));
-        T03 = _mm_unpacklo_epi32(T02, T03);
-        R00 = _mm_unpacklo_epi64(T01, T03);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref1 + (0) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref1 + (1) * frefstride));
-        T11 = _mm_unpacklo_epi32(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (2) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (3) * frefstride));
-        T13 = _mm_unpacklo_epi32(T12, T13);
-        R01 = _mm_unpacklo_epi64(T11, T13);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref2 + (0) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref2 + (1) * frefstride));
-        T11 = _mm_unpacklo_epi32(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (2) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (3) * frefstride));
-        T13 = _mm_unpacklo_epi32(T12, T13);
-        R02 = _mm_unpacklo_epi64(T11, T13);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref3 + (0) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref3 + (1) * frefstride));
-        T11 = _mm_unpacklo_epi32(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (2) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (3) * frefstride));
-        T13 = _mm_unpacklo_epi32(T12, T13);
-        R03 = _mm_unpacklo_epi64(T11, T13);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref4 + (0) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref4 + (1) * frefstride));
-        T11 = _mm_unpacklo_epi32(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref4 + (2) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref4 + (3) * frefstride));
-        T13 = _mm_unpacklo_epi32(T12, T13);
-        R04 = _mm_unpacklo_epi64(T11, T13);
-
-        T20 = _mm_sad_epu8(R00, R01);
-        sum0 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-
-        T20 = _mm_sad_epu8(R00, R02);
-        sum1 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-
-        T20 = _mm_sad_epu8(R00, R03);
-        sum2 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-
-        T20 = _mm_sad_epu8(R00, R04);
-        sum3 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-    }
-    else if (ly == 8)
-    {
-        T00 = _mm_loadl_epi64((__m128i*)(fenc + (0) * FENC_STRIDE));
-        T01 = _mm_loadl_epi64((__m128i*)(fenc + (1) * FENC_STRIDE));
-        T01 = _mm_unpacklo_epi32(T00, T01);
-        T02 = _mm_loadl_epi64((__m128i*)(fenc + (2) * FENC_STRIDE));
-        T03 = _mm_loadl_epi64((__m128i*)(fenc + (3) * FENC_STRIDE));
-        T03 = _mm_unpacklo_epi32(T02, T03);
-        R00 = _mm_unpacklo_epi64(T01, T03);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref1 + (0) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref1 + (1) * frefstride));
-        T11 = _mm_unpacklo_epi32(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (2) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (3) * frefstride));
-        T13 = _mm_unpacklo_epi32(T12, T13);
-        R01 = _mm_unpacklo_epi64(T11, T13);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref2 + (0) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref2 + (1) * frefstride));
-        T11 = _mm_unpacklo_epi32(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (2) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (3) * frefstride));
-        T13 = _mm_unpacklo_epi32(T12, T13);
-        R02 = _mm_unpacklo_epi64(T11, T13);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref3 + (0) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref3 + (1) * frefstride));
-        T11 = _mm_unpacklo_epi32(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (2) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (3) * frefstride));
-        T13 = _mm_unpacklo_epi32(T12, T13);
-        R03 = _mm_unpacklo_epi64(T11, T13);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref4 + (0) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref4 + (1) * frefstride));
-        T11 = _mm_unpacklo_epi32(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref4 + (2) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref4 + (3) * frefstride));
-        T13 = _mm_unpacklo_epi32(T12, T13);
-        R04 = _mm_unpacklo_epi64(T11, T13);
-
-        T20 = _mm_sad_epu8(R00, R01);
-        sum0 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-
-        T20 = _mm_sad_epu8(R00, R02);
-        sum1 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-
-        T20 = _mm_sad_epu8(R00, R03);
-        sum2 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-
-        T20 = _mm_sad_epu8(R00, R04);
-        sum3 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-
-        T00 = _mm_loadl_epi64((__m128i*)(fenc + (4) * FENC_STRIDE));
-        T01 = _mm_loadl_epi64((__m128i*)(fenc + (5) * FENC_STRIDE));
-        T01 = _mm_unpacklo_epi32(T00, T01);
-        T02 = _mm_loadl_epi64((__m128i*)(fenc + (6) * FENC_STRIDE));
-        T03 = _mm_loadl_epi64((__m128i*)(fenc + (7) * FENC_STRIDE));
-        T03 = _mm_unpacklo_epi32(T02, T03);
-        R00 = _mm_unpacklo_epi64(T01, T03);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref1 + (4) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref1 + (5) * frefstride));
-        T11 = _mm_unpacklo_epi32(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (6) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (7) * frefstride));
-        T13 = _mm_unpacklo_epi32(T12, T13);
-        R01 = _mm_unpacklo_epi64(T11, T13);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref2 + (4) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref2 + (5) * frefstride));
-        T11 = _mm_unpacklo_epi32(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (6) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (7) * frefstride));
-        T13 = _mm_unpacklo_epi32(T12, T13);
-        R02 = _mm_unpacklo_epi64(T11, T13);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref3 + (4) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref3 + (5) * frefstride));
-        T11 = _mm_unpacklo_epi32(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (6) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (7) * frefstride));
-        T13 = _mm_unpacklo_epi32(T12, T13);
-        R03 = _mm_unpacklo_epi64(T11, T13);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref4 + (4) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref4 + (5) * frefstride));
-        T11 = _mm_unpacklo_epi32(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref4 + (6) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref4 + (7) * frefstride));
-        T13 = _mm_unpacklo_epi32(T12, T13);
-        R04 = _mm_unpacklo_epi64(T11, T13);
-
-        T20 = _mm_sad_epu8(R00, R01);
-        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-        sum0 = _mm_add_epi32(sum0, T20);
-
-        T20 = _mm_sad_epu8(R00, R02);
-        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-        sum1 = _mm_add_epi32(sum1, T20);
-
-        T20 = _mm_sad_epu8(R00, R03);
-        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-        sum2 = _mm_add_epi32(sum2, T20);
-
-        T20 = _mm_sad_epu8(R00, R04);
-        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-        sum3 = _mm_add_epi32(sum3, T20);
-    }
-    else if (ly == 16)
-    {
-        T00 = _mm_loadl_epi64((__m128i*)(fenc + (0) * FENC_STRIDE));
-        T01 = _mm_loadl_epi64((__m128i*)(fenc + (1) * FENC_STRIDE));
-        T01 = _mm_unpacklo_epi32(T00, T01);
-        T02 = _mm_loadl_epi64((__m128i*)(fenc + (2) * FENC_STRIDE));
-        T03 = _mm_loadl_epi64((__m128i*)(fenc + (3) * FENC_STRIDE));
-        T03 = _mm_unpacklo_epi32(T02, T03);
-        R00 = _mm_unpacklo_epi64(T01, T03);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref1 + (0) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref1 + (1) * frefstride));
-        T11 = _mm_unpacklo_epi32(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (2) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (3) * frefstride));
-        T13 = _mm_unpacklo_epi32(T12, T13);
-        R01 = _mm_unpacklo_epi64(T11, T13);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref2 + (0) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref2 + (1) * frefstride));
-        T11 = _mm_unpacklo_epi32(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (2) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (3) * frefstride));
-        T13 = _mm_unpacklo_epi32(T12, T13);
-        R02 = _mm_unpacklo_epi64(T11, T13);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref3 + (0) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref3 + (1) * frefstride));
-        T11 = _mm_unpacklo_epi32(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (2) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (3) * frefstride));
-        T13 = _mm_unpacklo_epi32(T12, T13);
-        R03 = _mm_unpacklo_epi64(T11, T13);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref4 + (0) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref4 + (1) * frefstride));
-        T11 = _mm_unpacklo_epi32(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref4 + (2) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref4 + (3) * frefstride));
-        T13 = _mm_unpacklo_epi32(T12, T13);
-        R04 = _mm_unpacklo_epi64(T11, T13);
-
-        T20 = _mm_sad_epu8(R00, R01);
-        sum0 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-
-        T20 = _mm_sad_epu8(R00, R02);
-        sum1 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-
-        T20 = _mm_sad_epu8(R00, R03);
-        sum2 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-
-        T20 = _mm_sad_epu8(R00, R04);
-        sum3 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-
-        T00 = _mm_loadl_epi64((__m128i*)(fenc + (4) * FENC_STRIDE));
-        T01 = _mm_loadl_epi64((__m128i*)(fenc + (5) * FENC_STRIDE));
-        T01 = _mm_unpacklo_epi32(T00, T01);
-        T02 = _mm_loadl_epi64((__m128i*)(fenc + (6) * FENC_STRIDE));
-        T03 = _mm_loadl_epi64((__m128i*)(fenc + (7) * FENC_STRIDE));
-        T03 = _mm_unpacklo_epi32(T02, T03);
-        R00 = _mm_unpacklo_epi64(T01, T03);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref1 + (4) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref1 + (5) * frefstride));
-        T11 = _mm_unpacklo_epi32(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (6) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (7) * frefstride));
-        T13 = _mm_unpacklo_epi32(T12, T13);
-        R01 = _mm_unpacklo_epi64(T11, T13);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref2 + (4) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref2 + (5) * frefstride));
-        T11 = _mm_unpacklo_epi32(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (6) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (7) * frefstride));
-        T13 = _mm_unpacklo_epi32(T12, T13);
-        R02 = _mm_unpacklo_epi64(T11, T13);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref3 + (4) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref3 + (5) * frefstride));
-        T11 = _mm_unpacklo_epi32(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (6) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (7) * frefstride));
-        T13 = _mm_unpacklo_epi32(T12, T13);
-        R03 = _mm_unpacklo_epi64(T11, T13);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref4 + (4) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref4 + (5) * frefstride));
-        T11 = _mm_unpacklo_epi32(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref4 + (6) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref4 + (7) * frefstride));
-        T13 = _mm_unpacklo_epi32(T12, T13);
-        R04 = _mm_unpacklo_epi64(T11, T13);
-
-        T20 = _mm_sad_epu8(R00, R01);
-        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-        sum0 = _mm_add_epi32(sum0, T20);
-
-        T20 = _mm_sad_epu8(R00, R02);
-        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-        sum1 = _mm_add_epi32(sum1, T20);
-
-        T20 = _mm_sad_epu8(R00, R03);
-        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-        sum2 = _mm_add_epi32(sum2, T20);
-
-        T20 = _mm_sad_epu8(R00, R04);
-        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-        sum3 = _mm_add_epi32(sum3, T20);
-
-        T00 = _mm_loadl_epi64((__m128i*)(fenc + (8) * FENC_STRIDE));
-        T01 = _mm_loadl_epi64((__m128i*)(fenc + (9) * FENC_STRIDE));
-        T01 = _mm_unpacklo_epi32(T00, T01);
-        T02 = _mm_loadl_epi64((__m128i*)(fenc + (10) * FENC_STRIDE));
-        T03 = _mm_loadl_epi64((__m128i*)(fenc + (11) * FENC_STRIDE));
-        T03 = _mm_unpacklo_epi32(T02, T03);
-        R00 = _mm_unpacklo_epi64(T01, T03);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref1 + (8) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref1 + (9) * frefstride));
-        T11 = _mm_unpacklo_epi32(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (10) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (11) * frefstride));
-        T13 = _mm_unpacklo_epi32(T12, T13);
-        R01 = _mm_unpacklo_epi64(T11, T13);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref2 + (8) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref2 + (9) * frefstride));
-        T11 = _mm_unpacklo_epi32(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (10) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (11) * frefstride));
-        T13 = _mm_unpacklo_epi32(T12, T13);
-        R02 = _mm_unpacklo_epi64(T11, T13);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref3 + (8) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref3 + (9) * frefstride));
-        T11 = _mm_unpacklo_epi32(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (10) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (11) * frefstride));
-        T13 = _mm_unpacklo_epi32(T12, T13);
-        R03 = _mm_unpacklo_epi64(T11, T13);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref4 + (8) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref4 + (9) * frefstride));
-        T11 = _mm_unpacklo_epi32(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref4 + (10) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref4 + (11) * frefstride));
-        T13 = _mm_unpacklo_epi32(T12, T13);
-        R04 = _mm_unpacklo_epi64(T11, T13);
-
-        T20 = _mm_sad_epu8(R00, R01);
-        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-        sum0 = _mm_add_epi32(sum0, T20);
-
-        T20 = _mm_sad_epu8(R00, R02);
-        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-        sum1 = _mm_add_epi32(sum1, T20);
-
-        T20 = _mm_sad_epu8(R00, R03);
-        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-        sum2 = _mm_add_epi32(sum2, T20);
-
-        T20 = _mm_sad_epu8(R00, R04);
-        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-        sum3 = _mm_add_epi32(sum3, T20);
-
-        T00 = _mm_loadl_epi64((__m128i*)(fenc + (12) * FENC_STRIDE));
-        T01 = _mm_loadl_epi64((__m128i*)(fenc + (13) * FENC_STRIDE));
-        T01 = _mm_unpacklo_epi32(T00, T01);
-        T02 = _mm_loadl_epi64((__m128i*)(fenc + (14) * FENC_STRIDE));
-        T03 = _mm_loadl_epi64((__m128i*)(fenc + (15) * FENC_STRIDE));
-        T03 = _mm_unpacklo_epi32(T02, T03);
-        R00 = _mm_unpacklo_epi64(T01, T03);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref1 + (12) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref1 + (13) * frefstride));
-        T11 = _mm_unpacklo_epi32(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref1 + (14) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref1 + (15) * frefstride));
-        T13 = _mm_unpacklo_epi32(T12, T13);
-        R01 = _mm_unpacklo_epi64(T11, T13);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref2 + (12) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref2 + (13) * frefstride));
-        T11 = _mm_unpacklo_epi32(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref2 + (14) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref2 + (15) * frefstride));
-        T13 = _mm_unpacklo_epi32(T12, T13);
-        R02 = _mm_unpacklo_epi64(T11, T13);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref3 + (12) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref3 + (13) * frefstride));
-        T11 = _mm_unpacklo_epi32(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref3 + (14) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref3 + (15) * frefstride));
-        T13 = _mm_unpacklo_epi32(T12, T13);
-        R03 = _mm_unpacklo_epi64(T11, T13);
-
-        T10 = _mm_loadl_epi64((__m128i*)(fref4 + (12) * frefstride));
-        T11 = _mm_loadl_epi64((__m128i*)(fref4 + (13) * frefstride));
-        T11 = _mm_unpacklo_epi32(T10, T11);
-        T12 = _mm_loadl_epi64((__m128i*)(fref4 + (14) * frefstride));
-        T13 = _mm_loadl_epi64((__m128i*)(fref4 + (15) * frefstride));
-        T13 = _mm_unpacklo_epi32(T12, T13);
-        R04 = _mm_unpacklo_epi64(T11, T13);
-
-        T20 = _mm_sad_epu8(R00, R01);
-        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-        sum0 = _mm_add_epi32(sum0, T20);
-
-        T20 = _mm_sad_epu8(R00, R02);
-        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-        sum1 = _mm_add_epi32(sum1, T20);
-
-        T20 = _mm_sad_epu8(R00, R03);
-        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-        sum2 = _mm_add_epi32(sum2, T20);
-
-        T20 = _mm_sad_epu8(R00, R04);
-        T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-        sum3 = _mm_add_epi32(sum3, T20);
-    }
-    else if ((ly % 8) == 0)
-    {
-        for (int i = 0; i < ly; i += 8)
-        {
-            T00 = _mm_loadl_epi64((__m128i*)(fenc + (i + 0) * FENC_STRIDE));
-            T01 = _mm_loadl_epi64((__m128i*)(fenc + (i + 1) * FENC_STRIDE));
-            T01 = _mm_unpacklo_epi32(T00, T01);
-            T02 = _mm_loadl_epi64((__m128i*)(fenc + (i + 2) * FENC_STRIDE));
-            T03 = _mm_loadl_epi64((__m128i*)(fenc + (i + 3) * FENC_STRIDE));
-            T03 = _mm_unpacklo_epi32(T02, T03);
-            R00 = _mm_unpacklo_epi64(T01, T03);
-
-            T10 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 0) * frefstride));
-            T11 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 1) * frefstride));
-            T11 = _mm_unpacklo_epi32(T10, T11);
-            T12 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 2) * frefstride));
-            T13 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 3) * frefstride));
-            T13 = _mm_unpacklo_epi32(T12, T13);
-            R01 = _mm_unpacklo_epi64(T11, T13);
-
-            T10 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 0) * frefstride));
-            T11 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 1) * frefstride));
-            T11 = _mm_unpacklo_epi32(T10, T11);
-            T12 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 2) * frefstride));
-            T13 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 3) * frefstride));
-            T13 = _mm_unpacklo_epi32(T12, T13);
-            R02 = _mm_unpacklo_epi64(T11, T13);
-
-            T10 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 0) * frefstride));
-            T11 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 1) * frefstride));
-            T11 = _mm_unpacklo_epi32(T10, T11);
-            T12 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 2) * frefstride));
-            T13 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 3) * frefstride));
-            T13 = _mm_unpacklo_epi32(T12, T13);
-            R03 = _mm_unpacklo_epi64(T11, T13);
-
-            T10 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 0) * frefstride));
-            T11 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 1) * frefstride));
-            T11 = _mm_unpacklo_epi32(T10, T11);
-            T12 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 2) * frefstride));
-            T13 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 3) * frefstride));
-            T13 = _mm_unpacklo_epi32(T12, T13);
-            R04 = _mm_unpacklo_epi64(T11, T13);
-
-            T20 = _mm_sad_epu8(R00, R01);
-            T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-            sum0 = _mm_add_epi32(sum0, T20);
-
-            T20 = _mm_sad_epu8(R00, R02);
-            T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-            sum1 = _mm_add_epi32(sum1, T20);
-
-            T20 = _mm_sad_epu8(R00, R03);
-            T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-            sum2 = _mm_add_epi32(sum2, T20);
-
-            T20 = _mm_sad_epu8(R00, R04);
-            T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-            sum3 = _mm_add_epi32(sum3, T20);
-
-            T00 = _mm_loadl_epi64((__m128i*)(fenc + (i + 4) * FENC_STRIDE));
-            T01 = _mm_loadl_epi64((__m128i*)(fenc + (i + 5) * FENC_STRIDE));
-            T01 = _mm_unpacklo_epi32(T00, T01);
-            T02 = _mm_loadl_epi64((__m128i*)(fenc + (i + 6) * FENC_STRIDE));
-            T03 = _mm_loadl_epi64((__m128i*)(fenc + (i + 7) * FENC_STRIDE));
-            T03 = _mm_unpacklo_epi32(T02, T03);
-            R00 = _mm_unpacklo_epi64(T01, T03);
-
-            T10 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 4) * frefstride));
-            T11 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 5) * frefstride));
-            T11 = _mm_unpacklo_epi32(T10, T11);
-            T12 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 6) * frefstride));
-            T13 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 7) * frefstride));
-            T13 = _mm_unpacklo_epi32(T12, T13);
-            R01 = _mm_unpacklo_epi64(T11, T13);
-
-            T10 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 4) * frefstride));
-            T11 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 5) * frefstride));
-            T11 = _mm_unpacklo_epi32(T10, T11);
-            T12 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 6) * frefstride));
-            T13 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 7) * frefstride));
-            T13 = _mm_unpacklo_epi32(T12, T13);
-            R02 = _mm_unpacklo_epi64(T11, T13);
-
-            T10 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 4) * frefstride));
-            T11 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 5) * frefstride));
-            T11 = _mm_unpacklo_epi32(T10, T11);
-            T12 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 6) * frefstride));
-            T13 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 7) * frefstride));
-            T13 = _mm_unpacklo_epi32(T12, T13);
-            R03 = _mm_unpacklo_epi64(T11, T13);
-
-            T10 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 4) * frefstride));
-            T11 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 5) * frefstride));
-            T11 = _mm_unpacklo_epi32(T10, T11);
-            T12 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 6) * frefstride));
-            T13 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 7) * frefstride));
-            T13 = _mm_unpacklo_epi32(T12, T13);
-            R04 = _mm_unpacklo_epi64(T11, T13);
-
-            T20 = _mm_sad_epu8(R00, R01);
-            T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-            sum0 = _mm_add_epi32(sum0, T20);
-
-            T20 = _mm_sad_epu8(R00, R02);
-            T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-            sum1 = _mm_add_epi32(sum1, T20);
-
-            T20 = _mm_sad_epu8(R00, R03);
-            T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-            sum2 = _mm_add_epi32(sum2, T20);
-
-            T20 = _mm_sad_epu8(R00, R04);
-            T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-            sum3 = _mm_add_epi32(sum3, T20);
-        }
-    }
-    else
-    {
-        for (int i = 0; i < ly; i += 4)
-        {
-            T00 = _mm_loadl_epi64((__m128i*)(fenc + (i + 0) * FENC_STRIDE));
-            T01 = _mm_loadl_epi64((__m128i*)(fenc + (i + 1) * FENC_STRIDE));
-            T01 = _mm_unpacklo_epi32(T00, T01);
-            T02 = _mm_loadl_epi64((__m128i*)(fenc + (i + 2) * FENC_STRIDE));
-            T03 = _mm_loadl_epi64((__m128i*)(fenc + (i + 3) * FENC_STRIDE));
-            T03 = _mm_unpacklo_epi32(T02, T03);
-            R00 = _mm_unpacklo_epi64(T01, T03);
-
-            T10 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 0) * frefstride));
-            T11 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 1) * frefstride));
-            T11 = _mm_unpacklo_epi32(T10, T11);
-            T12 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 2) * frefstride));
-            T13 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 3) * frefstride));
-            T13 = _mm_unpacklo_epi32(T12, T13);
-            R01 = _mm_unpacklo_epi64(T11, T13);
-
-            T10 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 0) * frefstride));
-            T11 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 1) * frefstride));
-            T11 = _mm_unpacklo_epi32(T10, T11);
-            T12 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 2) * frefstride));
-            T13 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 3) * frefstride));
-            T13 = _mm_unpacklo_epi32(T12, T13);
-            R02 = _mm_unpacklo_epi64(T11, T13);
-
-            T10 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 0) * frefstride));
-            T11 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 1) * frefstride));
-            T11 = _mm_unpacklo_epi32(T10, T11);
-            T12 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 2) * frefstride));
-            T13 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 3) * frefstride));
-            T13 = _mm_unpacklo_epi32(T12, T13);
-            R03 = _mm_unpacklo_epi64(T11, T13);
-
-            T10 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 0) * frefstride));
-            T11 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 1) * frefstride));
-            T11 = _mm_unpacklo_epi32(T10, T11);
-            T12 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 2) * frefstride));
-            T13 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 3) * frefstride));
-            T13 = _mm_unpacklo_epi32(T12, T13);
-            R04 = _mm_unpacklo_epi64(T11, T13);
-
-            T20 = _mm_sad_epu8(R00, R01);
-            T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-            sum0 = _mm_add_epi32(sum0, T20);
-
-            T20 = _mm_sad_epu8(R00, R02);
-            T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-            sum1 = _mm_add_epi32(sum1, T20);
-
-            T20 = _mm_sad_epu8(R00, R03);
-            T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-            sum2 = _mm_add_epi32(sum2, T20);
-
-            T20 = _mm_sad_epu8(R00, R04);
-            T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
-            sum3 = _mm_add_epi32(sum3, T20);
-        }
-    }
-    res[0] = _mm_cvtsi128_si32(sum0);
-    res[1] = _mm_cvtsi128_si32(sum1);
-    res[2] = _mm_cvtsi128_si32(sum2);
-    res[3] = _mm_cvtsi128_si32(sum3);
-}
-
-#endif /* if HAVE_MMX */
-
-#if HAVE_MMX
-template<int ly>
-void sad_x4_8(pixel *fenc, pixel *fref1, pixel *fref2, pixel *fref3, pixel *fref4, intptr_t frefstride, int *res)
-{
-    assert((ly % 4) == 0);
-
-    __m64 sum0 = _mm_setzero_si64();
-    __m64 sum1 = _mm_setzero_si64();
-    __m64 sum2 = _mm_setzero_si64();
-    __m64 sum3 = _mm_setzero_si64();
-
-    __m64 T00, T01, T02, T03, T04, T05, T06, T07;
-    __m64 T0, T1, T2, T3, T4, T5, T6, T7;
-    __m64 T10, T11, T12, T13, T14, T15, T16, T17;
-    __m64 T20, T21, T22, T23, T24, T25, T26, T27;
-
-    if (4 == ly)
-    {
-        T00 = (*(__m64*)(fenc + 0 * FENC_STRIDE));
-        T01 = (*(__m64*)(fenc + 1 * FENC_STRIDE));
-        T02 = (*(__m64*)(fenc + 2 * FENC_STRIDE));
-        T03 = (*(__m64*)(fenc + 3 * FENC_STRIDE));
-
-        T10 = (*(__m64*)(fref1 + 0 * frefstride));
-        T11 = (*(__m64*)(fref1 + 1 * frefstride));
-        T12 = (*(__m64*)(fref1 + 2 * frefstride));
-        T13 = (*(__m64*)(fref1 + 3 * frefstride));
-
-        T20 = _mm_sad_pu8(T00, T10);
-        T21 = _mm_sad_pu8(T01, T11);
-        T22 = _mm_sad_pu8(T02, T12);
-        T23 = _mm_sad_pu8(T03, T13);
-
-        sum0 = _mm_add_pi16(sum0, T20);
-        sum0 = _mm_add_pi16(sum0, T21);
-        sum0 = _mm_add_pi16(sum0, T22);
-        sum0 = _mm_add_pi16(sum0, T23);
-
-        T10 = (*(__m64*)(fref2 + 0 * frefstride));
-        T11 = (*(__m64*)(fref2 + 1 * frefstride));
-        T12 = (*(__m64*)(fref2 + 2 * frefstride));
-        T13 = (*(__m64*)(fref2 + 3 * frefstride));
-
-        T20 = _mm_sad_pu8(T00, T10);
-        T21 = _mm_sad_pu8(T01, T11);
-        T22 = _mm_sad_pu8(T02, T12);
-        T23 = _mm_sad_pu8(T03, T13);
-
-        sum1 = _mm_add_pi16(sum1, T20);
-        sum1 = _mm_add_pi16(sum1, T21);
-        sum1 = _mm_add_pi16(sum1, T22);
-        sum1 = _mm_add_pi16(sum1, T23);
-
-        T10 = (*(__m64*)(fref3 + 0 * frefstride));
-        T11 = (*(__m64*)(fref3 + 1 * frefstride));
-        T12 = (*(__m64*)(fref3 + 2 * frefstride));
-        T13 = (*(__m64*)(fref3 + 3 * frefstride));
-
-        T20 = _mm_sad_pu8(T00, T10);
-        T21 = _mm_sad_pu8(T01, T11);
-        T22 = _mm_sad_pu8(T02, T12);
-        T23 = _mm_sad_pu8(T03, T13);
-
-        sum2 = _mm_add_pi16(sum2, T20);
-        sum2 = _mm_add_pi16(sum2, T21);
-        sum2 = _mm_add_pi16(sum2, T22);
-        sum2 = _mm_add_pi16(sum2, T23);
-
-        T10 = (*(__m64*)(fref4 + 0 * frefstride));
-        T11 = (*(__m64*)(fref4 + 1 * frefstride));
-        T12 = (*(__m64*)(fref4 + 2 * frefstride));
-        T13 = (*(__m64*)(fref4 + 3 * frefstride));
-
-        T20 = _mm_sad_pu8(T00, T10);
-        T21 = _mm_sad_pu8(T01, T11);
-        T22 = _mm_sad_pu8(T02, T12);
-        T23 = _mm_sad_pu8(T03, T13);
-
-        sum3 = _mm_add_pi16(sum3, T20);
-        sum3 = _mm_add_pi16(sum3, T21);
-        sum3 = _mm_add_pi16(sum3, T22);
-        sum3 = _mm_add_pi16(sum3, T23);
-    }
-    else if (8 == ly)
-    {
-        T00 = (*(__m64*)(fenc + 0 * FENC_STRIDE));
-        T01 = (*(__m64*)(fenc + 1 * FENC_STRIDE));
-        T02 = (*(__m64*)(fenc + 2 * FENC_STRIDE));
-        T03 = (*(__m64*)(fenc + 3 * FENC_STRIDE));
-        T04 = (*(__m64*)(fenc + 4 * FENC_STRIDE));
-        T05 = (*(__m64*)(fenc + 5 * FENC_STRIDE));
-        T06 = (*(__m64*)(fenc + 6 * FENC_STRIDE));
-        T07 = (*(__m64*)(fenc + 7 * FENC_STRIDE));
-
-        T10 = (*(__m64*)(fref1 + 0 * frefstride));
-        T11 = (*(__m64*)(fref1 + 1 * frefstride));
-        T12 = (*(__m64*)(fref1 + 2 * frefstride));
-        T13 = (*(__m64*)(fref1 + 3 * frefstride));
-        T14 = (*(__m64*)(fref1 + 4 * frefstride));
-        T15 = (*(__m64*)(fref1 + 5 * frefstride));
-        T16 = (*(__m64*)(fref1 + 6 * frefstride));
-        T17 = (*(__m64*)(fref1 + 7 * frefstride));
-
-        T20 = _mm_sad_pu8(T00, T10);
-        T21 = _mm_sad_pu8(T01, T11);
-        T22 = _mm_sad_pu8(T02, T12);
-        T23 = _mm_sad_pu8(T03, T13);
-        T24 = _mm_sad_pu8(T04, T14);
-        T25 = _mm_sad_pu8(T05, T15);
-        T26 = _mm_sad_pu8(T06, T16);
-        T27 = _mm_sad_pu8(T07, T17);
-
-        sum0 = _mm_add_pi16(sum0, T20);
-        sum0 = _mm_add_pi16(sum0, T21);
-        sum0 = _mm_add_pi16(sum0, T22);
-        sum0 = _mm_add_pi16(sum0, T23);
-        sum0 = _mm_add_pi16(sum0, T24);
-        sum0 = _mm_add_pi16(sum0, T25);
-        sum0 = _mm_add_pi16(sum0, T26);
-        sum0 = _mm_add_pi16(sum0, T27);
-
-        T10 = (*(__m64*)(fref2 + 0 * frefstride));
-        T11 = (*(__m64*)(fref2 + 1 * frefstride));
-        T12 = (*(__m64*)(fref2 + 2 * frefstride));
-        T13 = (*(__m64*)(fref2 + 3 * frefstride));
-        T14 = (*(__m64*)(fref2 + 4 * frefstride));
-        T15 = (*(__m64*)(fref2 + 5 * frefstride));
-        T16 = (*(__m64*)(fref2 + 6 * frefstride));
-        T17 = (*(__m64*)(fref2 + 7 * frefstride));
-
-        T20 = _mm_sad_pu8(T00, T10);
-        T21 = _mm_sad_pu8(T01, T11);
-        T22 = _mm_sad_pu8(T02, T12);
-        T23 = _mm_sad_pu8(T03, T13);
-        T24 = _mm_sad_pu8(T04, T14);
-        T25 = _mm_sad_pu8(T05, T15);
-        T26 = _mm_sad_pu8(T06, T16);
-        T27 = _mm_sad_pu8(T07, T17);
-
-        sum1 = _mm_add_pi16(sum1, T20);
-        sum1 = _mm_add_pi16(sum1, T21);
-        sum1 = _mm_add_pi16(sum1, T22);
-        sum1 = _mm_add_pi16(sum1, T23);
-        sum1 = _mm_add_pi16(sum1, T24);
-        sum1 = _mm_add_pi16(sum1, T25);
-        sum1 = _mm_add_pi16(sum1, T26);
-        sum1 = _mm_add_pi16(sum1, T27);
-
-        T10 = (*(__m64*)(fref3 + 0 * frefstride));
-        T11 = (*(__m64*)(fref3 + 1 * frefstride));
-        T12 = (*(__m64*)(fref3 + 2 * frefstride));
-        T13 = (*(__m64*)(fref3 + 3 * frefstride));
-        T14 = (*(__m64*)(fref3 + 4 * frefstride));
-        T15 = (*(__m64*)(fref3 + 5 * frefstride));
-        T16 = (*(__m64*)(fref3 + 6 * frefstride));
-        T17 = (*(__m64*)(fref3 + 7 * frefstride));
-
-        T20 = _mm_sad_pu8(T00, T10);
-        T21 = _mm_sad_pu8(T01, T11);
-        T22 = _mm_sad_pu8(T02, T12);
-        T23 = _mm_sad_pu8(T03, T13);
-        T24 = _mm_sad_pu8(T04, T14);
-        T25 = _mm_sad_pu8(T05, T15);
-        T26 = _mm_sad_pu8(T06, T16);
-        T27 = _mm_sad_pu8(T07, T17);
-
-        sum2 = _mm_add_pi16(sum2, T20);
-        sum2 = _mm_add_pi16(sum2, T21);
-        sum2 = _mm_add_pi16(sum2, T22);
-        sum2 = _mm_add_pi16(sum2, T23);
-        sum2 = _mm_add_pi16(sum2, T24);
-        sum2 = _mm_add_pi16(sum2, T25);
-        sum2 = _mm_add_pi16(sum2, T26);
-        sum2 = _mm_add_pi16(sum2, T27);
-
-        T10 = (*(__m64*)(fref4 + 0 * frefstride));
-        T11 = (*(__m64*)(fref4 + 1 * frefstride));
-        T12 = (*(__m64*)(fref4 + 2 * frefstride));
-        T13 = (*(__m64*)(fref4 + 3 * frefstride));
-        T14 = (*(__m64*)(fref4 + 4 * frefstride));
-        T15 = (*(__m64*)(fref4 + 5 * frefstride));
-        T16 = (*(__m64*)(fref4 + 6 * frefstride));
-        T17 = (*(__m64*)(fref4 + 7 * frefstride));
-
-        T20 = _mm_sad_pu8(T00, T10);
-        T21 = _mm_sad_pu8(T01, T11);
-        T22 = _mm_sad_pu8(T02, T12);
-        T23 = _mm_sad_pu8(T03, T13);
-        T24 = _mm_sad_pu8(T04, T14);
-        T25 = _mm_sad_pu8(T05, T15);
-        T26 = _mm_sad_pu8(T06, T16);
-        T27 = _mm_sad_pu8(T07, T17);
-
-        sum3 = _mm_add_pi16(sum3, T20);
-        sum3 = _mm_add_pi16(sum3, T21);
-        sum3 = _mm_add_pi16(sum3, T22);
-        sum3 = _mm_add_pi16(sum3, T23);
-        sum3 = _mm_add_pi16(sum3, T24);
-        sum3 = _mm_add_pi16(sum3, T25);
-        sum3 = _mm_add_pi16(sum3, T26);
-        sum3 = _mm_add_pi16(sum3, T27);
-    }
-    else if (16 == ly)
-    {
-        T00 = (*(__m64*)(fenc + 0 * FENC_STRIDE));
-        T01 = (*(__m64*)(fenc + 1 * FENC_STRIDE));
-        T02 = (*(__m64*)(fenc + 2 * FENC_STRIDE));
-        T03 = (*(__m64*)(fenc + 3 * FENC_STRIDE));
-        T04 = (*(__m64*)(fenc + 4 * FENC_STRIDE));
-        T05 = (*(__m64*)(fenc + 5 * FENC_STRIDE));
-        T06 = (*(__m64*)(fenc + 6 * FENC_STRIDE));
-        T07 = (*(__m64*)(fenc + 7 * FENC_STRIDE));
-        T0 = (*(__m64*)(fenc +  8 * FENC_STRIDE));
-        T1 = (*(__m64*)(fenc +  9 * FENC_STRIDE));
-        T2 = (*(__m64*)(fenc +  10 * FENC_STRIDE));
-        T3 = (*(__m64*)(fenc +  11 * FENC_STRIDE));
-        T4 = (*(__m64*)(fenc + 12 * FENC_STRIDE));
-        T5 = (*(__m64*)(fenc + 13 * FENC_STRIDE));
-        T6 = (*(__m64*)(fenc + 14 * FENC_STRIDE));
-        T7 = (*(__m64*)(fenc + 15 * FENC_STRIDE));
-
-        T10 = (*(__m64*)(fref1 + 0 * frefstride));
-        T11 = (*(__m64*)(fref1 + 1 * frefstride));
-        T12 = (*(__m64*)(fref1 + 2 * frefstride));
-        T13 = (*(__m64*)(fref1 + 3 * frefstride));
-        T14 = (*(__m64*)(fref1 + 4 * frefstride));
-        T15 = (*(__m64*)(fref1 + 5 * frefstride));
-        T16 = (*(__m64*)(fref1 + 6 * frefstride));
-        T17 = (*(__m64*)(fref1 + 7 * frefstride));
-
-        T20 = _mm_sad_pu8(T00, T10);
-        T21 = _mm_sad_pu8(T01, T11);
-        T22 = _mm_sad_pu8(T02, T12);
-        T23 = _mm_sad_pu8(T03, T13);
-        T24 = _mm_sad_pu8(T04, T14);
-        T25 = _mm_sad_pu8(T05, T15);
-        T26 = _mm_sad_pu8(T06, T16);
-        T27 = _mm_sad_pu8(T07, T17);
-
-        sum0 = _mm_add_pi16(sum0, T20);
-        sum0 = _mm_add_pi16(sum0, T21);
-        sum0 = _mm_add_pi16(sum0, T22);
-        sum0 = _mm_add_pi16(sum0, T23);
-        sum0 = _mm_add_pi16(sum0, T24);
-        sum0 = _mm_add_pi16(sum0, T25);
-        sum0 = _mm_add_pi16(sum0, T26);
-        sum0 = _mm_add_pi16(sum0, T27);
-
-        T10 = (*(__m64*)(fref1 + 8 * frefstride));
-        T11 = (*(__m64*)(fref1 + 9 * frefstride));
-        T12 = (*(__m64*)(fref1 + 10 * frefstride));
-        T13 = (*(__m64*)(fref1 + 11 * frefstride));
-        T14 = (*(__m64*)(fref1 + 12 * frefstride));
-        T15 = (*(__m64*)(fref1 + 13 * frefstride));
-        T16 = (*(__m64*)(fref1 + 14 * frefstride));
-        T17 = (*(__m64*)(fref1 + 15 * frefstride));
-
-        T20 = _mm_sad_pu8(T0, T10);
-        T21 = _mm_sad_pu8(T1, T11);
-        T22 = _mm_sad_pu8(T2, T12);
-        T23 = _mm_sad_pu8(T3, T13);
-        T24 = _mm_sad_pu8(T4, T14);
-        T25 = _mm_sad_pu8(T5, T15);
-        T26 = _mm_sad_pu8(T6, T16);
-        T27 = _mm_sad_pu8(T7, T17);
-
-        sum0 = _mm_add_pi16(sum0, T20);
-        sum0 = _mm_add_pi16(sum0, T21);
-        sum0 = _mm_add_pi16(sum0, T22);
-        sum0 = _mm_add_pi16(sum0, T23);
-        sum0 = _mm_add_pi16(sum0, T24);
-        sum0 = _mm_add_pi16(sum0, T25);
-        sum0 = _mm_add_pi16(sum0, T26);
-        sum0 = _mm_add_pi16(sum0, T27);
-
-        T10 = (*(__m64*)(fref2 + 0 * frefstride));
-        T11 = (*(__m64*)(fref2 + 1 * frefstride));
-        T12 = (*(__m64*)(fref2 + 2 * frefstride));
-        T13 = (*(__m64*)(fref2 + 3 * frefstride));
-        T14 = (*(__m64*)(fref2 + 4 * frefstride));
-        T15 = (*(__m64*)(fref2 + 5 * frefstride));
-        T16 = (*(__m64*)(fref2 + 6 * frefstride));
-        T17 = (*(__m64*)(fref2 + 7 * frefstride));
-
-        T20 = _mm_sad_pu8(T00, T10);
-        T21 = _mm_sad_pu8(T01, T11);
-        T22 = _mm_sad_pu8(T02, T12);
-        T23 = _mm_sad_pu8(T03, T13);
-        T24 = _mm_sad_pu8(T04, T14);
-        T25 = _mm_sad_pu8(T05, T15);
-        T26 = _mm_sad_pu8(T06, T16);
-        T27 = _mm_sad_pu8(T07, T17);
-
-        sum1 = _mm_add_pi16(sum1, T20);
-        sum1 = _mm_add_pi16(sum1, T21);
-        sum1 = _mm_add_pi16(sum1, T22);
-        sum1 = _mm_add_pi16(sum1, T23);
-        sum1 = _mm_add_pi16(sum1, T24);
-        sum1 = _mm_add_pi16(sum1, T25);
-        sum1 = _mm_add_pi16(sum1, T26);
-        sum1 = _mm_add_pi16(sum1, T27);
-
-        T10 = (*(__m64*)(fref2 + 8 * frefstride));
-        T11 = (*(__m64*)(fref2 + 9 * frefstride));
-        T12 = (*(__m64*)(fref2 + 10 * frefstride));
-        T13 = (*(__m64*)(fref2 + 11 * frefstride));
-        T14 = (*(__m64*)(fref2 + 12 * frefstride));
-        T15 = (*(__m64*)(fref2 + 13 * frefstride));
-        T16 = (*(__m64*)(fref2 + 14 * frefstride));
-        T17 = (*(__m64*)(fref2 + 15 * frefstride));
-
-        T20 = _mm_sad_pu8(T0, T10);
-        T21 = _mm_sad_pu8(T1, T11);
-        T22 = _mm_sad_pu8(T2, T12);
-        T23 = _mm_sad_pu8(T3, T13);
-        T24 = _mm_sad_pu8(T4, T14);
-        T25 = _mm_sad_pu8(T5, T15);
-        T26 = _mm_sad_pu8(T6, T16);
-        T27 = _mm_sad_pu8(T7, T17);
-
-        sum1 = _mm_add_pi16(sum1, T20);
-        sum1 = _mm_add_pi16(sum1, T21);
-        sum1 = _mm_add_pi16(sum1, T22);
-        sum1 = _mm_add_pi16(sum1, T23);
-        sum1 = _mm_add_pi16(sum1, T24);
-        sum1 = _mm_add_pi16(sum1, T25);
-        sum1 = _mm_add_pi16(sum1, T26);
-        sum1 = _mm_add_pi16(sum1, T27);
-
-        T10 = (*(__m64*)(fref3 + 0 * frefstride));
-        T11 = (*(__m64*)(fref3 + 1 * frefstride));
-        T12 = (*(__m64*)(fref3 + 2 * frefstride));
-        T13 = (*(__m64*)(fref3 + 3 * frefstride));
-        T14 = (*(__m64*)(fref3 + 4 * frefstride));
-        T15 = (*(__m64*)(fref3 + 5 * frefstride));
-        T16 = (*(__m64*)(fref3 + 6 * frefstride));
-        T17 = (*(__m64*)(fref3 + 7 * frefstride));
-
-        T20 = _mm_sad_pu8(T00, T10);
-        T21 = _mm_sad_pu8(T01, T11);
-        T22 = _mm_sad_pu8(T02, T12);
-        T23 = _mm_sad_pu8(T03, T13);
-        T24 = _mm_sad_pu8(T04, T14);
-        T25 = _mm_sad_pu8(T05, T15);
-        T26 = _mm_sad_pu8(T06, T16);
-        T27 = _mm_sad_pu8(T07, T17);
-
-        sum2 = _mm_add_pi16(sum2, T20);
-        sum2 = _mm_add_pi16(sum2, T21);
-        sum2 = _mm_add_pi16(sum2, T22);
-        sum2 = _mm_add_pi16(sum2, T23);
-        sum2 = _mm_add_pi16(sum2, T24);
-        sum2 = _mm_add_pi16(sum2, T25);
-        sum2 = _mm_add_pi16(sum2, T26);
-        sum2 = _mm_add_pi16(sum2, T27);
-
-        T10 = (*(__m64*)(fref3 + 8 * frefstride));
-        T11 = (*(__m64*)(fref3 + 9 * frefstride));
-        T12 = (*(__m64*)(fref3 + 10 * frefstride));
-        T13 = (*(__m64*)(fref3 + 11 * frefstride));
-        T14 = (*(__m64*)(fref3 + 12 * frefstride));
-        T15 = (*(__m64*)(fref3 + 13 * frefstride));
-        T16 = (*(__m64*)(fref3 + 14 * frefstride));
-        T17 = (*(__m64*)(fref3 + 15 * frefstride));
-
-        T20 = _mm_sad_pu8(T0, T10);
-        T21 = _mm_sad_pu8(T1, T11);
-        T22 = _mm_sad_pu8(T2, T12);
-        T23 = _mm_sad_pu8(T3, T13);
-        T24 = _mm_sad_pu8(T4, T14);
-        T25 = _mm_sad_pu8(T5, T15);
-        T26 = _mm_sad_pu8(T6, T16);
-        T27 = _mm_sad_pu8(T7, T17);
-
-        sum2 = _mm_add_pi16(sum2, T20);
-        sum2 = _mm_add_pi16(sum2, T21);
-        sum2 = _mm_add_pi16(sum2, T22);
-        sum2 = _mm_add_pi16(sum2, T23);
-        sum2 = _mm_add_pi16(sum2, T24);
-        sum2 = _mm_add_pi16(sum2, T25);
-        sum2 = _mm_add_pi16(sum2, T26);
-        sum2 = _mm_add_pi16(sum2, T27);
 
         T10 = (*(__m64*)(fref4 + 0 * frefstride));
         T11 = (*(__m64*)(fref4 + 1 * frefstride));



More information about the x265-devel mailing list