[x265] [PATCH] pixel8.inc: Optimization with sad_x4 4xn
praveen at multicorewareinc.com
praveen at multicorewareinc.com
Mon Sep 2 09:10:26 CEST 2013
# HG changeset patch
# User praveen Tiwari
# Date 1378105815 -19800
# Node ID f42c5db55c77e7ea197c323e41c69c237fb00f21
# Parent 1a632b2e0fc7a3256202d3afd7dc187042081607
pixel8.inc: Optimization with sad_x4 4xn
diff -r 1a632b2e0fc7 -r f42c5db55c77 source/common/vec/pixel8.inc
--- a/source/common/vec/pixel8.inc Mon Sep 02 11:58:04 2013 +0530
+++ b/source/common/vec/pixel8.inc Mon Sep 02 12:40:15 2013 +0530
@@ -4377,634 +4377,644 @@
{
assert((ly % 4) == 0);
- __m64 sum0 = _mm_setzero_si64();
- __m64 sum1 = _mm_setzero_si64();
- __m64 sum2 = _mm_setzero_si64();
- __m64 sum3 = _mm_setzero_si64();
-
- __m64 T00, T01, T02, T03, T04, T05, T06, T07;
- __m64 T0, T1, T2, T3, T4, T5, T6, T7;
- __m64 T10, T11, T12, T13, T14, T15, T16, T17;
- __m64 T20, T21, T22, T23, T24, T25, T26, T27;
-
if (ly == 4)
{
- T00 = _mm_cvtsi32_si64(*(int*)(fenc + 0 * FENC_STRIDE));
- T01 = _mm_cvtsi32_si64(*(int*)(fenc + 1 * FENC_STRIDE));
- T02 = _mm_cvtsi32_si64(*(int*)(fenc + 2 * FENC_STRIDE));
+ __m64 sum0, sum1, sum2, sum3;
+ __m64 T10, T11, T12, T13;
+ __m64 T00, T01, T02, T03;
+ __m64 T20, T21;
+
+ T00 = _mm_cvtsi32_si64(*(int*)(fenc));
+ T01 = _mm_cvtsi32_si64(*(int*)(fenc + FENC_STRIDE));
+ T00 = _mm_unpacklo_pi8(T00, T01);
+ T02 = _mm_cvtsi32_si64(*(int*)(fenc + (FENC_STRIDE << 1)));
T03 = _mm_cvtsi32_si64(*(int*)(fenc + 3 * FENC_STRIDE));
-
- T10 = _mm_cvtsi32_si64(*(int*)(fref1 + 0 * frefstride));
- T11 = _mm_cvtsi32_si64(*(int*)(fref1 + 1 * frefstride));
+ T02 = _mm_unpacklo_pi8(T02, T03);
+
+ T10 = _mm_cvtsi32_si64(*(int*)(fref1));
+ T11 = _mm_cvtsi32_si64(*(int*)(fref1 + frefstride));
+ T10 = _mm_unpacklo_pi8(T10, T11);
T12 = _mm_cvtsi32_si64(*(int*)(fref1 + 2 * frefstride));
T13 = _mm_cvtsi32_si64(*(int*)(fref1 + 3 * frefstride));
+ T12 = _mm_unpacklo_pi8(T12, T13);
T20 = _mm_sad_pu8(T00, T10);
- T21 = _mm_sad_pu8(T01, T11);
- T22 = _mm_sad_pu8(T02, T12);
- T23 = _mm_sad_pu8(T03, T13);
-
- sum0 = _mm_add_pi16(sum0, T20);
- sum0 = _mm_add_pi16(sum0, T21);
- sum0 = _mm_add_pi16(sum0, T22);
- sum0 = _mm_add_pi16(sum0, T23);
-
- T10 = _mm_cvtsi32_si64(*(int*)(fref2 + 0 * frefstride));
- T11 = _mm_cvtsi32_si64(*(int*)(fref2 + 1 * frefstride));
+ T21 = _mm_sad_pu8(T02, T12);
+
+ sum0 = _mm_add_pi16(T20, T21);
+
+ T10 = _mm_cvtsi32_si64(*(int*)(fref2));
+ T11 = _mm_cvtsi32_si64(*(int*)(fref2 + frefstride));
+ T10 = _mm_unpacklo_pi8(T10, T11);
T12 = _mm_cvtsi32_si64(*(int*)(fref2 + 2 * frefstride));
T13 = _mm_cvtsi32_si64(*(int*)(fref2 + 3 * frefstride));
+ T12 = _mm_unpacklo_pi8(T12, T13);
T20 = _mm_sad_pu8(T00, T10);
- T21 = _mm_sad_pu8(T01, T11);
- T22 = _mm_sad_pu8(T02, T12);
- T23 = _mm_sad_pu8(T03, T13);
-
- sum1 = _mm_add_pi16(sum1, T20);
- sum1 = _mm_add_pi16(sum1, T21);
- sum1 = _mm_add_pi16(sum1, T22);
- sum1 = _mm_add_pi16(sum1, T23);
-
- T10 = _mm_cvtsi32_si64(*(int*)(fref3 + 0 * frefstride));
- T11 = _mm_cvtsi32_si64(*(int*)(fref3 + 1 * frefstride));
+ T21 = _mm_sad_pu8(T02, T12);
+
+ sum1 = _mm_add_pi16(T20, T21);
+
+ T10 = _mm_cvtsi32_si64(*(int*)(fref3));
+ T11 = _mm_cvtsi32_si64(*(int*)(fref3 + frefstride));
+ T10 = _mm_unpacklo_pi8(T10, T11);
T12 = _mm_cvtsi32_si64(*(int*)(fref3 + 2 * frefstride));
T13 = _mm_cvtsi32_si64(*(int*)(fref3 + 3 * frefstride));
+ T12 = _mm_unpacklo_pi8(T12, T13);
T20 = _mm_sad_pu8(T00, T10);
- T21 = _mm_sad_pu8(T01, T11);
- T22 = _mm_sad_pu8(T02, T12);
- T23 = _mm_sad_pu8(T03, T13);
-
- sum2 = _mm_add_pi16(sum2, T20);
- sum2 = _mm_add_pi16(sum2, T21);
- sum2 = _mm_add_pi16(sum2, T22);
- sum2 = _mm_add_pi16(sum2, T23);
-
- T10 = _mm_cvtsi32_si64(*(int*)(fref4 + 0 * frefstride));
- T11 = _mm_cvtsi32_si64(*(int*)(fref4 + 1 * frefstride));
+ T21 = _mm_sad_pu8(T02, T12);
+
+ sum2 = _mm_add_pi16(T20, T21);
+
+ T10 = _mm_cvtsi32_si64(*(int*)(fref4));
+ T11 = _mm_cvtsi32_si64(*(int*)(fref4 + frefstride));
+ T10 = _mm_unpacklo_pi8(T10, T11);
T12 = _mm_cvtsi32_si64(*(int*)(fref4 + 2 * frefstride));
T13 = _mm_cvtsi32_si64(*(int*)(fref4 + 3 * frefstride));
+ T12 = _mm_unpacklo_pi8(T12, T13);
T20 = _mm_sad_pu8(T00, T10);
- T21 = _mm_sad_pu8(T01, T11);
- T22 = _mm_sad_pu8(T02, T12);
- T23 = _mm_sad_pu8(T03, T13);
-
- sum3 = _mm_add_pi16(sum3, T20);
- sum3 = _mm_add_pi16(sum3, T21);
- sum3 = _mm_add_pi16(sum3, T22);
- sum3 = _mm_add_pi16(sum3, T23);
+ T21 = _mm_sad_pu8(T02, T12);
+
+ sum3 = _mm_add_pi16(T20, T21);
+
+ res[0] = _m_to_int(sum0);
+ res[1] = _m_to_int(sum1);
+ res[2] = _m_to_int(sum2);
+ res[3] = _m_to_int(sum3);
}
else if (ly == 8)
{
- T00 = _mm_cvtsi32_si64(*(int*)(fenc + 0 * FENC_STRIDE));
- T01 = _mm_cvtsi32_si64(*(int*)(fenc + 1 * FENC_STRIDE));
- T02 = _mm_cvtsi32_si64(*(int*)(fenc + 2 * FENC_STRIDE));
- T03 = _mm_cvtsi32_si64(*(int*)(fenc + 3 * FENC_STRIDE));
- T04 = _mm_cvtsi32_si64(*(int*)(fenc + 4 * FENC_STRIDE));
- T05 = _mm_cvtsi32_si64(*(int*)(fenc + 5 * FENC_STRIDE));
- T06 = _mm_cvtsi32_si64(*(int*)(fenc + 6 * FENC_STRIDE));
- T07 = _mm_cvtsi32_si64(*(int*)(fenc + 7 * FENC_STRIDE));
-
- T10 = _mm_cvtsi32_si64(*(int*)(fref1 + 0 * frefstride));
- T11 = _mm_cvtsi32_si64(*(int*)(fref1 + 1 * frefstride));
- T12 = _mm_cvtsi32_si64(*(int*)(fref1 + 2 * frefstride));
- T13 = _mm_cvtsi32_si64(*(int*)(fref1 + 3 * frefstride));
- T14 = _mm_cvtsi32_si64(*(int*)(fref1 + 4 * frefstride));
- T15 = _mm_cvtsi32_si64(*(int*)(fref1 + 5 * frefstride));
- T16 = _mm_cvtsi32_si64(*(int*)(fref1 + 6 * frefstride));
- T17 = _mm_cvtsi32_si64(*(int*)(fref1 + 7 * frefstride));
-
- T20 = _mm_sad_pu8(T00, T10);
- T21 = _mm_sad_pu8(T01, T11);
- T22 = _mm_sad_pu8(T02, T12);
- T23 = _mm_sad_pu8(T03, T13);
- T24 = _mm_sad_pu8(T04, T14);
- T25 = _mm_sad_pu8(T05, T15);
- T26 = _mm_sad_pu8(T06, T16);
- T27 = _mm_sad_pu8(T07, T17);
-
- sum0 = _mm_add_pi16(sum0, T20);
- sum0 = _mm_add_pi16(sum0, T21);
- sum0 = _mm_add_pi16(sum0, T22);
- sum0 = _mm_add_pi16(sum0, T23);
- sum0 = _mm_add_pi16(sum0, T24);
- sum0 = _mm_add_pi16(sum0, T25);
- sum0 = _mm_add_pi16(sum0, T26);
- sum0 = _mm_add_pi16(sum0, T27);
-
- T10 = _mm_cvtsi32_si64(*(int*)(fref2 + 0 * frefstride));
- T11 = _mm_cvtsi32_si64(*(int*)(fref2 + 1 * frefstride));
- T12 = _mm_cvtsi32_si64(*(int*)(fref2 + 2 * frefstride));
- T13 = _mm_cvtsi32_si64(*(int*)(fref2 + 3 * frefstride));
- T14 = _mm_cvtsi32_si64(*(int*)(fref2 + 4 * frefstride));
- T15 = _mm_cvtsi32_si64(*(int*)(fref2 + 5 * frefstride));
- T16 = _mm_cvtsi32_si64(*(int*)(fref2 + 6 * frefstride));
- T17 = _mm_cvtsi32_si64(*(int*)(fref2 + 7 * frefstride));
-
- T20 = _mm_sad_pu8(T00, T10);
- T21 = _mm_sad_pu8(T01, T11);
- T22 = _mm_sad_pu8(T02, T12);
- T23 = _mm_sad_pu8(T03, T13);
- T24 = _mm_sad_pu8(T04, T14);
- T25 = _mm_sad_pu8(T05, T15);
- T26 = _mm_sad_pu8(T06, T16);
- T27 = _mm_sad_pu8(T07, T17);
-
- sum1 = _mm_add_pi16(sum1, T20);
- sum1 = _mm_add_pi16(sum1, T21);
- sum1 = _mm_add_pi16(sum1, T22);
- sum1 = _mm_add_pi16(sum1, T23);
- sum1 = _mm_add_pi16(sum1, T24);
- sum1 = _mm_add_pi16(sum1, T25);
- sum1 = _mm_add_pi16(sum1, T26);
- sum1 = _mm_add_pi16(sum1, T27);
-
- T10 = _mm_cvtsi32_si64(*(int*)(fref3 + 0 * frefstride));
- T11 = _mm_cvtsi32_si64(*(int*)(fref3 + 1 * frefstride));
- T12 = _mm_cvtsi32_si64(*(int*)(fref3 + 2 * frefstride));
- T13 = _mm_cvtsi32_si64(*(int*)(fref3 + 3 * frefstride));
- T14 = _mm_cvtsi32_si64(*(int*)(fref3 + 4 * frefstride));
- T15 = _mm_cvtsi32_si64(*(int*)(fref3 + 5 * frefstride));
- T16 = _mm_cvtsi32_si64(*(int*)(fref3 + 6 * frefstride));
- T17 = _mm_cvtsi32_si64(*(int*)(fref3 + 7 * frefstride));
-
- T20 = _mm_sad_pu8(T00, T10);
- T21 = _mm_sad_pu8(T01, T11);
- T22 = _mm_sad_pu8(T02, T12);
- T23 = _mm_sad_pu8(T03, T13);
- T24 = _mm_sad_pu8(T04, T14);
- T25 = _mm_sad_pu8(T05, T15);
- T26 = _mm_sad_pu8(T06, T16);
- T27 = _mm_sad_pu8(T07, T17);
-
- sum2 = _mm_add_pi16(sum2, T20);
- sum2 = _mm_add_pi16(sum2, T21);
- sum2 = _mm_add_pi16(sum2, T22);
- sum2 = _mm_add_pi16(sum2, T23);
- sum2 = _mm_add_pi16(sum2, T24);
- sum2 = _mm_add_pi16(sum2, T25);
- sum2 = _mm_add_pi16(sum2, T26);
- sum2 = _mm_add_pi16(sum2, T27);
-
- T10 = _mm_cvtsi32_si64(*(int*)(fref4 + 0 * frefstride));
- T11 = _mm_cvtsi32_si64(*(int*)(fref4 + 1 * frefstride));
- T12 = _mm_cvtsi32_si64(*(int*)(fref4 + 2 * frefstride));
- T13 = _mm_cvtsi32_si64(*(int*)(fref4 + 3 * frefstride));
- T14 = _mm_cvtsi32_si64(*(int*)(fref4 + 4 * frefstride));
- T15 = _mm_cvtsi32_si64(*(int*)(fref4 + 5 * frefstride));
- T16 = _mm_cvtsi32_si64(*(int*)(fref4 + 6 * frefstride));
- T17 = _mm_cvtsi32_si64(*(int*)(fref4 + 7 * frefstride));
-
- T20 = _mm_sad_pu8(T00, T10);
- T21 = _mm_sad_pu8(T01, T11);
- T22 = _mm_sad_pu8(T02, T12);
- T23 = _mm_sad_pu8(T03, T13);
- T24 = _mm_sad_pu8(T04, T14);
- T25 = _mm_sad_pu8(T05, T15);
- T26 = _mm_sad_pu8(T06, T16);
- T27 = _mm_sad_pu8(T07, T17);
-
- sum3 = _mm_add_pi16(sum3, T20);
- sum3 = _mm_add_pi16(sum3, T21);
- sum3 = _mm_add_pi16(sum3, T22);
- sum3 = _mm_add_pi16(sum3, T23);
- sum3 = _mm_add_pi16(sum3, T24);
- sum3 = _mm_add_pi16(sum3, T25);
- sum3 = _mm_add_pi16(sum3, T26);
- sum3 = _mm_add_pi16(sum3, T27);
+ __m128i sum0, sum1, sum2, sum3;
+
+ __m128i T00, T01, T02, T03;
+ __m128i T10, T11, T12, T13;
+ __m128i R00, R01, R02, R03, R04;
+ __m128i T20;
+
+ T00 = _mm_loadl_epi64((__m128i*)(fenc));
+ T01 = _mm_loadl_epi64((__m128i*)(fenc + FENC_STRIDE));
+ T01 = _mm_unpacklo_epi32(T00, T01);
+ T02 = _mm_loadl_epi64((__m128i*)(fenc + (2) * FENC_STRIDE));
+ T03 = _mm_loadl_epi64((__m128i*)(fenc + (3) * FENC_STRIDE));
+ T03 = _mm_unpacklo_epi32(T02, T03);
+ R00 = _mm_unpacklo_epi64(T01, T03);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref1));
+ T11 = _mm_loadl_epi64((__m128i*)(fref1 + frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref1 + 2 * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref1 + 3 * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R01 = _mm_unpacklo_epi64(T11, T13);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref2));
+ T11 = _mm_loadl_epi64((__m128i*)(fref2 + frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref2 + 2 * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref2 + 3 * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R02 = _mm_unpacklo_epi64(T11, T13);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref3));
+ T11 = _mm_loadl_epi64((__m128i*)(fref3 + frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref3 + 2 * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref3 + 3 * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R03 = _mm_unpacklo_epi64(T11, T13);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref4));
+ T11 = _mm_loadl_epi64((__m128i*)(fref4 + frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref4 + 2 * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref4 + 3 * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R04 = _mm_unpacklo_epi64(T11, T13);
+
+ T20 = _mm_sad_epu8(R00, R01);
+ sum0 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+
+ T20 = _mm_sad_epu8(R00, R02);
+ sum1 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+
+ T20 = _mm_sad_epu8(R00, R03);
+ sum2 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+
+ T20 = _mm_sad_epu8(R00, R04);
+ sum3 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+
+ T00 = _mm_loadl_epi64((__m128i*)(fenc + (4) * FENC_STRIDE));
+ T01 = _mm_loadl_epi64((__m128i*)(fenc + (5) * FENC_STRIDE));
+ T01 = _mm_unpacklo_epi32(T00, T01);
+ T02 = _mm_loadl_epi64((__m128i*)(fenc + (6) * FENC_STRIDE));
+ T03 = _mm_loadl_epi64((__m128i*)(fenc + (7) * FENC_STRIDE));
+ T03 = _mm_unpacklo_epi32(T02, T03);
+ R00 = _mm_unpacklo_epi64(T01, T03);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref1 + 4 * frefstride));
+ T11 = _mm_loadl_epi64((__m128i*)(fref1 + 5 * frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref1 + 6 * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref1 + 7 * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R01 = _mm_unpacklo_epi64(T11, T13);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref2 + 4 * frefstride));
+ T11 = _mm_loadl_epi64((__m128i*)(fref2 + 5 * frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref2 + 6 * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref2 + 7 * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R02 = _mm_unpacklo_epi64(T11, T13);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref3 + 4 * frefstride));
+ T11 = _mm_loadl_epi64((__m128i*)(fref3 + 5 * frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref3 + 6 * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref3 + 7 * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R03 = _mm_unpacklo_epi64(T11, T13);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref4 + 4 * frefstride));
+ T11 = _mm_loadl_epi64((__m128i*)(fref4 + 5 * frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref4 + 6 * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref4 + 7 * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R04 = _mm_unpacklo_epi64(T11, T13);
+
+ T20 = _mm_sad_epu8(R00, R01);
+ T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+ sum0 = _mm_add_epi32(sum0, T20);
+
+ T20 = _mm_sad_epu8(R00, R02);
+ T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+ sum1 = _mm_add_epi32(sum1, T20);
+
+ T20 = _mm_sad_epu8(R00, R03);
+ T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+ sum2 = _mm_add_epi32(sum2, T20);
+
+ T20 = _mm_sad_epu8(R00, R04);
+ T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+ sum3 = _mm_add_epi32(sum3, T20);
+
+ res[0] = _mm_cvtsi128_si32(sum0);
+ res[1] = _mm_cvtsi128_si32(sum1);
+ res[2] = _mm_cvtsi128_si32(sum2);
+ res[3] = _mm_cvtsi128_si32(sum3);
}
else if (ly == 16)
{
- T00 = _mm_cvtsi32_si64(*(int*)(fenc + 0 * FENC_STRIDE));
- T01 = _mm_cvtsi32_si64(*(int*)(fenc + 1 * FENC_STRIDE));
- T02 = _mm_cvtsi32_si64(*(int*)(fenc + 2 * FENC_STRIDE));
- T03 = _mm_cvtsi32_si64(*(int*)(fenc + 3 * FENC_STRIDE));
- T04 = _mm_cvtsi32_si64(*(int*)(fenc + 4 * FENC_STRIDE));
- T05 = _mm_cvtsi32_si64(*(int*)(fenc + 5 * FENC_STRIDE));
- T06 = _mm_cvtsi32_si64(*(int*)(fenc + 6 * FENC_STRIDE));
- T07 = _mm_cvtsi32_si64(*(int*)(fenc + 7 * FENC_STRIDE));
- T0 = _mm_cvtsi32_si64(*(int*)(fenc + 8 * FENC_STRIDE));
- T1 = _mm_cvtsi32_si64(*(int*)(fenc + 9 * FENC_STRIDE));
- T2 = _mm_cvtsi32_si64(*(int*)(fenc + 10 * FENC_STRIDE));
- T3 = _mm_cvtsi32_si64(*(int*)(fenc + 11 * FENC_STRIDE));
- T4 = _mm_cvtsi32_si64(*(int*)(fenc + 12 * FENC_STRIDE));
- T5 = _mm_cvtsi32_si64(*(int*)(fenc + 13 * FENC_STRIDE));
- T6 = _mm_cvtsi32_si64(*(int*)(fenc + 14 * FENC_STRIDE));
- T7 = _mm_cvtsi32_si64(*(int*)(fenc + 15 * FENC_STRIDE));
-
- T10 = _mm_cvtsi32_si64(*(int*)(fref1 + 0 * frefstride));
- T11 = _mm_cvtsi32_si64(*(int*)(fref1 + 1 * frefstride));
- T12 = _mm_cvtsi32_si64(*(int*)(fref1 + 2 * frefstride));
- T13 = _mm_cvtsi32_si64(*(int*)(fref1 + 3 * frefstride));
- T14 = _mm_cvtsi32_si64(*(int*)(fref1 + 4 * frefstride));
- T15 = _mm_cvtsi32_si64(*(int*)(fref1 + 5 * frefstride));
- T16 = _mm_cvtsi32_si64(*(int*)(fref1 + 6 * frefstride));
- T17 = _mm_cvtsi32_si64(*(int*)(fref1 + 7 * frefstride));
-
- T20 = _mm_sad_pu8(T00, T10);
- T21 = _mm_sad_pu8(T01, T11);
- T22 = _mm_sad_pu8(T02, T12);
- T23 = _mm_sad_pu8(T03, T13);
- T24 = _mm_sad_pu8(T04, T14);
- T25 = _mm_sad_pu8(T05, T15);
- T26 = _mm_sad_pu8(T06, T16);
- T27 = _mm_sad_pu8(T07, T17);
-
- sum0 = _mm_add_pi16(sum0, T20);
- sum0 = _mm_add_pi16(sum0, T21);
- sum0 = _mm_add_pi16(sum0, T22);
- sum0 = _mm_add_pi16(sum0, T23);
- sum0 = _mm_add_pi16(sum0, T24);
- sum0 = _mm_add_pi16(sum0, T25);
- sum0 = _mm_add_pi16(sum0, T26);
- sum0 = _mm_add_pi16(sum0, T27);
-
- T10 = _mm_cvtsi32_si64(*(int*)(fref1 + 8 * frefstride));
- T11 = _mm_cvtsi32_si64(*(int*)(fref1 + 9 * frefstride));
- T12 = _mm_cvtsi32_si64(*(int*)(fref1 + 10 * frefstride));
- T13 = _mm_cvtsi32_si64(*(int*)(fref1 + 11 * frefstride));
- T14 = _mm_cvtsi32_si64(*(int*)(fref1 + 12 * frefstride));
- T15 = _mm_cvtsi32_si64(*(int*)(fref1 + 13 * frefstride));
- T16 = _mm_cvtsi32_si64(*(int*)(fref1 + 14 * frefstride));
- T17 = _mm_cvtsi32_si64(*(int*)(fref1 + 15 * frefstride));
-
- T20 = _mm_sad_pu8(T0, T10);
- T21 = _mm_sad_pu8(T1, T11);
- T22 = _mm_sad_pu8(T2, T12);
- T23 = _mm_sad_pu8(T3, T13);
- T24 = _mm_sad_pu8(T4, T14);
- T25 = _mm_sad_pu8(T5, T15);
- T26 = _mm_sad_pu8(T6, T16);
- T27 = _mm_sad_pu8(T7, T17);
-
- sum0 = _mm_add_pi16(sum0, T20);
- sum0 = _mm_add_pi16(sum0, T21);
- sum0 = _mm_add_pi16(sum0, T22);
- sum0 = _mm_add_pi16(sum0, T23);
- sum0 = _mm_add_pi16(sum0, T24);
- sum0 = _mm_add_pi16(sum0, T25);
- sum0 = _mm_add_pi16(sum0, T26);
- sum0 = _mm_add_pi16(sum0, T27);
-
- T10 = _mm_cvtsi32_si64(*(int*)(fref2 + 0 * frefstride));
- T11 = _mm_cvtsi32_si64(*(int*)(fref2 + 1 * frefstride));
- T12 = _mm_cvtsi32_si64(*(int*)(fref2 + 2 * frefstride));
- T13 = _mm_cvtsi32_si64(*(int*)(fref2 + 3 * frefstride));
- T14 = _mm_cvtsi32_si64(*(int*)(fref2 + 4 * frefstride));
- T15 = _mm_cvtsi32_si64(*(int*)(fref2 + 5 * frefstride));
- T16 = _mm_cvtsi32_si64(*(int*)(fref2 + 6 * frefstride));
- T17 = _mm_cvtsi32_si64(*(int*)(fref2 + 7 * frefstride));
-
- T20 = _mm_sad_pu8(T00, T10);
- T21 = _mm_sad_pu8(T01, T11);
- T22 = _mm_sad_pu8(T02, T12);
- T23 = _mm_sad_pu8(T03, T13);
- T24 = _mm_sad_pu8(T04, T14);
- T25 = _mm_sad_pu8(T05, T15);
- T26 = _mm_sad_pu8(T06, T16);
- T27 = _mm_sad_pu8(T07, T17);
-
- sum1 = _mm_add_pi16(sum1, T20);
- sum1 = _mm_add_pi16(sum1, T21);
- sum1 = _mm_add_pi16(sum1, T22);
- sum1 = _mm_add_pi16(sum1, T23);
- sum1 = _mm_add_pi16(sum1, T24);
- sum1 = _mm_add_pi16(sum1, T25);
- sum1 = _mm_add_pi16(sum1, T26);
- sum1 = _mm_add_pi16(sum1, T27);
-
- T10 = _mm_cvtsi32_si64(*(int*)(fref2 + 8 * frefstride));
- T11 = _mm_cvtsi32_si64(*(int*)(fref2 + 9 * frefstride));
- T12 = _mm_cvtsi32_si64(*(int*)(fref2 + 10 * frefstride));
- T13 = _mm_cvtsi32_si64(*(int*)(fref2 + 11 * frefstride));
- T14 = _mm_cvtsi32_si64(*(int*)(fref2 + 12 * frefstride));
- T15 = _mm_cvtsi32_si64(*(int*)(fref2 + 13 * frefstride));
- T16 = _mm_cvtsi32_si64(*(int*)(fref2 + 14 * frefstride));
- T17 = _mm_cvtsi32_si64(*(int*)(fref2 + 15 * frefstride));
-
- T20 = _mm_sad_pu8(T0, T10);
- T21 = _mm_sad_pu8(T1, T11);
- T22 = _mm_sad_pu8(T2, T12);
- T23 = _mm_sad_pu8(T3, T13);
- T24 = _mm_sad_pu8(T4, T14);
- T25 = _mm_sad_pu8(T5, T15);
- T26 = _mm_sad_pu8(T6, T16);
- T27 = _mm_sad_pu8(T7, T17);
-
- sum1 = _mm_add_pi16(sum1, T20);
- sum1 = _mm_add_pi16(sum1, T21);
- sum1 = _mm_add_pi16(sum1, T22);
- sum1 = _mm_add_pi16(sum1, T23);
- sum1 = _mm_add_pi16(sum1, T24);
- sum1 = _mm_add_pi16(sum1, T25);
- sum1 = _mm_add_pi16(sum1, T26);
- sum1 = _mm_add_pi16(sum1, T27);
-
- T10 = _mm_cvtsi32_si64(*(int*)(fref3 + 0 * frefstride));
- T11 = _mm_cvtsi32_si64(*(int*)(fref3 + 1 * frefstride));
- T12 = _mm_cvtsi32_si64(*(int*)(fref3 + 2 * frefstride));
- T13 = _mm_cvtsi32_si64(*(int*)(fref3 + 3 * frefstride));
- T14 = _mm_cvtsi32_si64(*(int*)(fref3 + 4 * frefstride));
- T15 = _mm_cvtsi32_si64(*(int*)(fref3 + 5 * frefstride));
- T16 = _mm_cvtsi32_si64(*(int*)(fref3 + 6 * frefstride));
- T17 = _mm_cvtsi32_si64(*(int*)(fref3 + 7 * frefstride));
-
- T20 = _mm_sad_pu8(T00, T10);
- T21 = _mm_sad_pu8(T01, T11);
- T22 = _mm_sad_pu8(T02, T12);
- T23 = _mm_sad_pu8(T03, T13);
- T24 = _mm_sad_pu8(T04, T14);
- T25 = _mm_sad_pu8(T05, T15);
- T26 = _mm_sad_pu8(T06, T16);
- T27 = _mm_sad_pu8(T07, T17);
-
- sum2 = _mm_add_pi16(sum2, T20);
- sum2 = _mm_add_pi16(sum2, T21);
- sum2 = _mm_add_pi16(sum2, T22);
- sum2 = _mm_add_pi16(sum2, T23);
- sum2 = _mm_add_pi16(sum2, T24);
- sum2 = _mm_add_pi16(sum2, T25);
- sum2 = _mm_add_pi16(sum2, T26);
- sum2 = _mm_add_pi16(sum2, T27);
-
- T10 = _mm_cvtsi32_si64(*(int*)(fref3 + 8 * frefstride));
- T11 = _mm_cvtsi32_si64(*(int*)(fref3 + 9 * frefstride));
- T12 = _mm_cvtsi32_si64(*(int*)(fref3 + 10 * frefstride));
- T13 = _mm_cvtsi32_si64(*(int*)(fref3 + 11 * frefstride));
- T14 = _mm_cvtsi32_si64(*(int*)(fref3 + 12 * frefstride));
- T15 = _mm_cvtsi32_si64(*(int*)(fref3 + 13 * frefstride));
- T16 = _mm_cvtsi32_si64(*(int*)(fref3 + 14 * frefstride));
- T17 = _mm_cvtsi32_si64(*(int*)(fref3 + 15 * frefstride));
-
- T20 = _mm_sad_pu8(T0, T10);
- T21 = _mm_sad_pu8(T1, T11);
- T22 = _mm_sad_pu8(T2, T12);
- T23 = _mm_sad_pu8(T3, T13);
- T24 = _mm_sad_pu8(T4, T14);
- T25 = _mm_sad_pu8(T5, T15);
- T26 = _mm_sad_pu8(T6, T16);
- T27 = _mm_sad_pu8(T7, T17);
-
- sum2 = _mm_add_pi16(sum2, T20);
- sum2 = _mm_add_pi16(sum2, T21);
- sum2 = _mm_add_pi16(sum2, T22);
- sum2 = _mm_add_pi16(sum2, T23);
- sum2 = _mm_add_pi16(sum2, T24);
- sum2 = _mm_add_pi16(sum2, T25);
- sum2 = _mm_add_pi16(sum2, T26);
- sum2 = _mm_add_pi16(sum2, T27);
-
- T10 = _mm_cvtsi32_si64(*(int*)(fref4 + 0 * frefstride));
- T11 = _mm_cvtsi32_si64(*(int*)(fref4 + 1 * frefstride));
- T12 = _mm_cvtsi32_si64(*(int*)(fref4 + 2 * frefstride));
- T13 = _mm_cvtsi32_si64(*(int*)(fref4 + 3 * frefstride));
- T14 = _mm_cvtsi32_si64(*(int*)(fref4 + 4 * frefstride));
- T15 = _mm_cvtsi32_si64(*(int*)(fref4 + 5 * frefstride));
- T16 = _mm_cvtsi32_si64(*(int*)(fref4 + 6 * frefstride));
- T17 = _mm_cvtsi32_si64(*(int*)(fref4 + 7 * frefstride));
-
- T20 = _mm_sad_pu8(T00, T10);
- T21 = _mm_sad_pu8(T01, T11);
- T22 = _mm_sad_pu8(T02, T12);
- T23 = _mm_sad_pu8(T03, T13);
- T24 = _mm_sad_pu8(T04, T14);
- T25 = _mm_sad_pu8(T05, T15);
- T26 = _mm_sad_pu8(T06, T16);
- T27 = _mm_sad_pu8(T07, T17);
-
- sum3 = _mm_add_pi16(sum3, T20);
- sum3 = _mm_add_pi16(sum3, T21);
- sum3 = _mm_add_pi16(sum3, T22);
- sum3 = _mm_add_pi16(sum3, T23);
- sum3 = _mm_add_pi16(sum3, T24);
- sum3 = _mm_add_pi16(sum3, T25);
- sum3 = _mm_add_pi16(sum3, T26);
- sum3 = _mm_add_pi16(sum3, T27);
-
- T10 = _mm_cvtsi32_si64(*(int*)(fref4 + 8 * frefstride));
- T11 = _mm_cvtsi32_si64(*(int*)(fref4 + 9 * frefstride));
- T12 = _mm_cvtsi32_si64(*(int*)(fref4 + 10 * frefstride));
- T13 = _mm_cvtsi32_si64(*(int*)(fref4 + 11 * frefstride));
- T14 = _mm_cvtsi32_si64(*(int*)(fref4 + 12 * frefstride));
- T15 = _mm_cvtsi32_si64(*(int*)(fref4 + 13 * frefstride));
- T16 = _mm_cvtsi32_si64(*(int*)(fref4 + 14 * frefstride));
- T17 = _mm_cvtsi32_si64(*(int*)(fref4 + 15 * frefstride));
-
- T20 = _mm_sad_pu8(T0, T10);
- T21 = _mm_sad_pu8(T1, T11);
- T22 = _mm_sad_pu8(T2, T12);
- T23 = _mm_sad_pu8(T3, T13);
- T24 = _mm_sad_pu8(T4, T14);
- T25 = _mm_sad_pu8(T5, T15);
- T26 = _mm_sad_pu8(T6, T16);
- T27 = _mm_sad_pu8(T7, T17);
-
- sum3 = _mm_add_pi16(sum3, T20);
- sum3 = _mm_add_pi16(sum3, T21);
- sum3 = _mm_add_pi16(sum3, T22);
- sum3 = _mm_add_pi16(sum3, T23);
- sum3 = _mm_add_pi16(sum3, T24);
- sum3 = _mm_add_pi16(sum3, T25);
- sum3 = _mm_add_pi16(sum3, T26);
- sum3 = _mm_add_pi16(sum3, T27);
+ __m128i sum0, sum1, sum2, sum3;
+
+ __m128i T00, T01, T02, T03;
+ __m128i T10, T11, T12, T13;
+ __m128i R00, R01, R02, R03, R04;
+ __m128i T20;
+
+ T00 = _mm_loadl_epi64((__m128i*)(fenc));
+ T01 = _mm_loadl_epi64((__m128i*)(fenc + FENC_STRIDE));
+ T01 = _mm_unpacklo_epi32(T00, T01);
+ T02 = _mm_loadl_epi64((__m128i*)(fenc + (2) * FENC_STRIDE));
+ T03 = _mm_loadl_epi64((__m128i*)(fenc + (3) * FENC_STRIDE));
+ T03 = _mm_unpacklo_epi32(T02, T03);
+ R00 = _mm_unpacklo_epi64(T01, T03);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref1));
+ T11 = _mm_loadl_epi64((__m128i*)(fref1 + frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref1 + (2) * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref1 + (3) * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R01 = _mm_unpacklo_epi64(T11, T13);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref2));
+ T11 = _mm_loadl_epi64((__m128i*)(fref2 + frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref2 + (2) * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref2 + (3) * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R02 = _mm_unpacklo_epi64(T11, T13);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref3));
+ T11 = _mm_loadl_epi64((__m128i*)(fref3 + frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref3 + (2) * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref3 + (3) * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R03 = _mm_unpacklo_epi64(T11, T13);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref4));
+ T11 = _mm_loadl_epi64((__m128i*)(fref4 + frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref4 + (2) * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref4 + (3) * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R04 = _mm_unpacklo_epi64(T11, T13);
+
+ T20 = _mm_sad_epu8(R00, R01);
+ sum0 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+
+ T20 = _mm_sad_epu8(R00, R02);
+ sum1 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+
+ T20 = _mm_sad_epu8(R00, R03);
+ sum2 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+
+ T20 = _mm_sad_epu8(R00, R04);
+ sum3 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+
+ T00 = _mm_loadl_epi64((__m128i*)(fenc + (4) * FENC_STRIDE));
+ T01 = _mm_loadl_epi64((__m128i*)(fenc + (5) * FENC_STRIDE));
+ T01 = _mm_unpacklo_epi32(T00, T01);
+ T02 = _mm_loadl_epi64((__m128i*)(fenc + (6) * FENC_STRIDE));
+ T03 = _mm_loadl_epi64((__m128i*)(fenc + (7) * FENC_STRIDE));
+ T03 = _mm_unpacklo_epi32(T02, T03);
+ R00 = _mm_unpacklo_epi64(T01, T03);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref1 + (4) * frefstride));
+ T11 = _mm_loadl_epi64((__m128i*)(fref1 + (5) * frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref1 + (6) * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref1 + (7) * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R01 = _mm_unpacklo_epi64(T11, T13);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref2 + (4) * frefstride));
+ T11 = _mm_loadl_epi64((__m128i*)(fref2 + (5) * frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref2 + (6) * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref2 + (7) * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R02 = _mm_unpacklo_epi64(T11, T13);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref3 + (4) * frefstride));
+ T11 = _mm_loadl_epi64((__m128i*)(fref3 + (5) * frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref3 + (6) * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref3 + (7) * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R03 = _mm_unpacklo_epi64(T11, T13);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref4 + (4) * frefstride));
+ T11 = _mm_loadl_epi64((__m128i*)(fref4 + (5) * frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref4 + (6) * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref4 + (7) * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R04 = _mm_unpacklo_epi64(T11, T13);
+
+ T20 = _mm_sad_epu8(R00, R01);
+ T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+ sum0 = _mm_add_epi32(sum0, T20);
+
+ T20 = _mm_sad_epu8(R00, R02);
+ T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+ sum1 = _mm_add_epi32(sum1, T20);
+
+ T20 = _mm_sad_epu8(R00, R03);
+ T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+ sum2 = _mm_add_epi32(sum2, T20);
+
+ T20 = _mm_sad_epu8(R00, R04);
+ T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+ sum3 = _mm_add_epi32(sum3, T20);
+
+ T00 = _mm_loadl_epi64((__m128i*)(fenc + (8) * FENC_STRIDE));
+ T01 = _mm_loadl_epi64((__m128i*)(fenc + (9) * FENC_STRIDE));
+ T01 = _mm_unpacklo_epi32(T00, T01);
+ T02 = _mm_loadl_epi64((__m128i*)(fenc + (10) * FENC_STRIDE));
+ T03 = _mm_loadl_epi64((__m128i*)(fenc + (11) * FENC_STRIDE));
+ T03 = _mm_unpacklo_epi32(T02, T03);
+ R00 = _mm_unpacklo_epi64(T01, T03);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref1 + (8) * frefstride));
+ T11 = _mm_loadl_epi64((__m128i*)(fref1 + (9) * frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref1 + (10) * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref1 + (11) * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R01 = _mm_unpacklo_epi64(T11, T13);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref2 + (8) * frefstride));
+ T11 = _mm_loadl_epi64((__m128i*)(fref2 + (9) * frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref2 + (10) * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref2 + (11) * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R02 = _mm_unpacklo_epi64(T11, T13);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref3 + (8) * frefstride));
+ T11 = _mm_loadl_epi64((__m128i*)(fref3 + (9) * frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref3 + (10) * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref3 + (11) * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R03 = _mm_unpacklo_epi64(T11, T13);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref4 + (8) * frefstride));
+ T11 = _mm_loadl_epi64((__m128i*)(fref4 + (9) * frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref4 + (10) * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref4 + (11) * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R04 = _mm_unpacklo_epi64(T11, T13);
+
+ T20 = _mm_sad_epu8(R00, R01);
+ T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+ sum0 = _mm_add_epi32(sum0, T20);
+
+ T20 = _mm_sad_epu8(R00, R02);
+ T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+ sum1 = _mm_add_epi32(sum1, T20);
+
+ T20 = _mm_sad_epu8(R00, R03);
+ T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+ sum2 = _mm_add_epi32(sum2, T20);
+
+ T20 = _mm_sad_epu8(R00, R04);
+ T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+ sum3 = _mm_add_epi32(sum3, T20);
+
+ T00 = _mm_loadl_epi64((__m128i*)(fenc + (12) * FENC_STRIDE));
+ T01 = _mm_loadl_epi64((__m128i*)(fenc + (13) * FENC_STRIDE));
+ T01 = _mm_unpacklo_epi32(T00, T01);
+ T02 = _mm_loadl_epi64((__m128i*)(fenc + (14) * FENC_STRIDE));
+ T03 = _mm_loadl_epi64((__m128i*)(fenc + (15) * FENC_STRIDE));
+ T03 = _mm_unpacklo_epi32(T02, T03);
+ R00 = _mm_unpacklo_epi64(T01, T03);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref1 + (12) * frefstride));
+ T11 = _mm_loadl_epi64((__m128i*)(fref1 + (13) * frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref1 + (14) * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref1 + (15) * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R01 = _mm_unpacklo_epi64(T11, T13);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref2 + (12) * frefstride));
+ T11 = _mm_loadl_epi64((__m128i*)(fref2 + (13) * frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref2 + (14) * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref2 + (15) * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R02 = _mm_unpacklo_epi64(T11, T13);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref3 + (12) * frefstride));
+ T11 = _mm_loadl_epi64((__m128i*)(fref3 + (13) * frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref3 + (14) * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref3 + (15) * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R03 = _mm_unpacklo_epi64(T11, T13);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref4 + (12) * frefstride));
+ T11 = _mm_loadl_epi64((__m128i*)(fref4 + (13) * frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref4 + (14) * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref4 + (15) * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R04 = _mm_unpacklo_epi64(T11, T13);
+
+ T20 = _mm_sad_epu8(R00, R01);
+ T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+ sum0 = _mm_add_epi32(sum0, T20);
+
+ T20 = _mm_sad_epu8(R00, R02);
+ T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+ sum1 = _mm_add_epi32(sum1, T20);
+
+ T20 = _mm_sad_epu8(R00, R03);
+ T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+ sum2 = _mm_add_epi32(sum2, T20);
+
+ T20 = _mm_sad_epu8(R00, R04);
+ T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+ sum3 = _mm_add_epi32(sum3, T20);
+
+ res[0] = _mm_cvtsi128_si32(sum0);
+ res[1] = _mm_cvtsi128_si32(sum1);
+ res[2] = _mm_cvtsi128_si32(sum2);
+ res[3] = _mm_cvtsi128_si32(sum3);
}
else if ((ly % 8) == 0)
{
+ __m128i sum0 = _mm_setzero_si128();
+ __m128i sum1 = _mm_setzero_si128();
+ __m128i sum2 = _mm_setzero_si128();
+ __m128i sum3 = _mm_setzero_si128();
+
+ __m128i T00, T01, T02, T03;
+ __m128i T10, T11, T12, T13;
+ __m128i R00, R01, R02, R03, R04;
+ __m128i T20;
+
for (int i = 0; i < ly; i += 8)
{
+ T00 = _mm_loadl_epi64((__m128i*)(fenc + (i + 0) * FENC_STRIDE));
+ T01 = _mm_loadl_epi64((__m128i*)(fenc + (i + 1) * FENC_STRIDE));
+ T01 = _mm_unpacklo_epi32(T00, T01);
+ T02 = _mm_loadl_epi64((__m128i*)(fenc + (i + 2) * FENC_STRIDE));
+ T03 = _mm_loadl_epi64((__m128i*)(fenc + (i + 3) * FENC_STRIDE));
+ T03 = _mm_unpacklo_epi32(T02, T03);
+ R00 = _mm_unpacklo_epi64(T01, T03);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 0) * frefstride));
+ T11 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 1) * frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 2) * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 3) * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R01 = _mm_unpacklo_epi64(T11, T13);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 0) * frefstride));
+ T11 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 1) * frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 2) * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 3) * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R02 = _mm_unpacklo_epi64(T11, T13);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 0) * frefstride));
+ T11 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 1) * frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 2) * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 3) * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R03 = _mm_unpacklo_epi64(T11, T13);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 0) * frefstride));
+ T11 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 1) * frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 2) * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 3) * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R04 = _mm_unpacklo_epi64(T11, T13);
+
+ T20 = _mm_sad_epu8(R00, R01);
+ T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+ sum0 = _mm_add_epi32(sum0, T20);
+
+ T20 = _mm_sad_epu8(R00, R02);
+ T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+ sum1 = _mm_add_epi32(sum1, T20);
+
+ T20 = _mm_sad_epu8(R00, R03);
+ T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+ sum2 = _mm_add_epi32(sum2, T20);
+
+ T20 = _mm_sad_epu8(R00, R04);
+ T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+ sum3 = _mm_add_epi32(sum3, T20);
+
+ T00 = _mm_loadl_epi64((__m128i*)(fenc + (i + 4) * FENC_STRIDE));
+ T01 = _mm_loadl_epi64((__m128i*)(fenc + (i + 5) * FENC_STRIDE));
+ T01 = _mm_unpacklo_epi32(T00, T01);
+ T02 = _mm_loadl_epi64((__m128i*)(fenc + (i + 6) * FENC_STRIDE));
+ T03 = _mm_loadl_epi64((__m128i*)(fenc + (i + 7) * FENC_STRIDE));
+ T03 = _mm_unpacklo_epi32(T02, T03);
+ R00 = _mm_unpacklo_epi64(T01, T03);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 4) * frefstride));
+ T11 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 5) * frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 6) * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref1 + (i + 7) * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R01 = _mm_unpacklo_epi64(T11, T13);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 4) * frefstride));
+ T11 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 5) * frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 6) * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref2 + (i + 7) * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R02 = _mm_unpacklo_epi64(T11, T13);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 4) * frefstride));
+ T11 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 5) * frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 6) * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref3 + (i + 7) * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R03 = _mm_unpacklo_epi64(T11, T13);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 4) * frefstride));
+ T11 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 5) * frefstride));
+ T11 = _mm_unpacklo_epi32(T10, T11);
+ T12 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 6) * frefstride));
+ T13 = _mm_loadl_epi64((__m128i*)(fref4 + (i + 7) * frefstride));
+ T13 = _mm_unpacklo_epi32(T12, T13);
+ R04 = _mm_unpacklo_epi64(T11, T13);
+
+ T20 = _mm_sad_epu8(R00, R01);
+ T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+ sum0 = _mm_add_epi32(sum0, T20);
+
+ T20 = _mm_sad_epu8(R00, R02);
+ T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+ sum1 = _mm_add_epi32(sum1, T20);
+
+ T20 = _mm_sad_epu8(R00, R03);
+ T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+ sum2 = _mm_add_epi32(sum2, T20);
+
+ T20 = _mm_sad_epu8(R00, R04);
+ T20 = _mm_add_epi32(T20, _mm_shuffle_epi32(T20, 2));
+ sum3 = _mm_add_epi32(sum3, T20);
+ }
+
+ res[0] = _mm_cvtsi128_si32(sum0);
+ res[1] = _mm_cvtsi128_si32(sum1);
+ res[2] = _mm_cvtsi128_si32(sum2);
+ res[3] = _mm_cvtsi128_si32(sum3);
+ }
+ else
+ {
+ __m64 sum0 = _mm_setzero_si64();
+ __m64 sum1 = _mm_setzero_si64();
+ __m64 sum2 = _mm_setzero_si64();
+ __m64 sum3 = _mm_setzero_si64();
+
+ __m64 T10, T11, T12, T13;
+ __m64 T00, T01, T02, T03;
+ __m64 T20, T21;
+
+ for (int i = 0; i < ly; i += 4)
+ {
+ int frefstrideZero = (i + 0) * frefstride;
+ int frefstrideOne = (i + 1) * frefstride;
+ int frefstrideTwo = (i + 2) * frefstride;
+ int frefstrideThree = (i + 3) * frefstride;
+
T00 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 0) * FENC_STRIDE));
T01 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 1) * FENC_STRIDE));
+ T00 = _mm_unpacklo_pi8(T00, T01);
T02 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 2) * FENC_STRIDE));
T03 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 3) * FENC_STRIDE));
- T04 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 4) * FENC_STRIDE));
- T05 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 5) * FENC_STRIDE));
- T06 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 6) * FENC_STRIDE));
- T07 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 7) * FENC_STRIDE));
-
- T10 = _mm_cvtsi32_si64(*(int*)(fref1 + (i + 0) * frefstride));
- T11 = _mm_cvtsi32_si64(*(int*)(fref1 + (i + 1) * frefstride));
- T12 = _mm_cvtsi32_si64(*(int*)(fref1 + (i + 2) * frefstride));
- T13 = _mm_cvtsi32_si64(*(int*)(fref1 + (i + 3) * frefstride));
- T14 = _mm_cvtsi32_si64(*(int*)(fref1 + (i + 4) * frefstride));
- T15 = _mm_cvtsi32_si64(*(int*)(fref1 + (i + 5) * frefstride));
- T16 = _mm_cvtsi32_si64(*(int*)(fref1 + (i + 6) * frefstride));
- T17 = _mm_cvtsi32_si64(*(int*)(fref1 + (i + 7) * frefstride));
+ T02 = _mm_unpacklo_pi8(T02, T03);
+
+ T10 = _mm_cvtsi32_si64(*(int*)(fref1 + frefstrideZero));
+ T11 = _mm_cvtsi32_si64(*(int*)(fref1 + frefstrideOne));
+ T10 = _mm_unpacklo_pi8(T10, T11);
+ T12 = _mm_cvtsi32_si64(*(int*)(fref1 + frefstrideTwo));
+ T13 = _mm_cvtsi32_si64(*(int*)(fref1 + frefstrideThree));
+ T12 = _mm_unpacklo_pi8(T12, T13);
T20 = _mm_sad_pu8(T00, T10);
- T21 = _mm_sad_pu8(T01, T11);
- T22 = _mm_sad_pu8(T02, T12);
- T23 = _mm_sad_pu8(T03, T13);
- T24 = _mm_sad_pu8(T04, T14);
- T25 = _mm_sad_pu8(T05, T15);
- T26 = _mm_sad_pu8(T06, T16);
- T27 = _mm_sad_pu8(T07, T17);
+ T21 = _mm_sad_pu8(T02, T12);
sum0 = _mm_add_pi16(sum0, T20);
sum0 = _mm_add_pi16(sum0, T21);
- sum0 = _mm_add_pi16(sum0, T22);
- sum0 = _mm_add_pi16(sum0, T23);
- sum0 = _mm_add_pi16(sum0, T24);
- sum0 = _mm_add_pi16(sum0, T25);
- sum0 = _mm_add_pi16(sum0, T26);
- sum0 = _mm_add_pi16(sum0, T27);
-
- T10 = _mm_cvtsi32_si64(*(int*)(fref2 + (i + 0) * frefstride));
- T11 = _mm_cvtsi32_si64(*(int*)(fref2 + (i + 1) * frefstride));
- T12 = _mm_cvtsi32_si64(*(int*)(fref2 + (i + 2) * frefstride));
- T13 = _mm_cvtsi32_si64(*(int*)(fref2 + (i + 3) * frefstride));
- T14 = _mm_cvtsi32_si64(*(int*)(fref2 + (i + 4) * frefstride));
- T15 = _mm_cvtsi32_si64(*(int*)(fref2 + (i + 5) * frefstride));
- T16 = _mm_cvtsi32_si64(*(int*)(fref2 + (i + 6) * frefstride));
- T17 = _mm_cvtsi32_si64(*(int*)(fref2 + (i + 7) * frefstride));
+
+ T10 = _mm_cvtsi32_si64(*(int*)(fref2 + frefstrideZero));
+ T11 = _mm_cvtsi32_si64(*(int*)(fref2 + frefstrideOne));
+ T10 = _mm_unpacklo_pi8(T10, T11);
+ T12 = _mm_cvtsi32_si64(*(int*)(fref2 + frefstrideTwo));
+ T13 = _mm_cvtsi32_si64(*(int*)(fref2 + frefstrideThree));
+ T12 = _mm_unpacklo_pi8(T12, T13);
T20 = _mm_sad_pu8(T00, T10);
- T21 = _mm_sad_pu8(T01, T11);
- T22 = _mm_sad_pu8(T02, T12);
- T23 = _mm_sad_pu8(T03, T13);
- T24 = _mm_sad_pu8(T04, T14);
- T25 = _mm_sad_pu8(T05, T15);
- T26 = _mm_sad_pu8(T06, T16);
- T27 = _mm_sad_pu8(T07, T17);
+ T21 = _mm_sad_pu8(T02, T12);
sum1 = _mm_add_pi16(sum1, T20);
sum1 = _mm_add_pi16(sum1, T21);
- sum1 = _mm_add_pi16(sum1, T22);
- sum1 = _mm_add_pi16(sum1, T23);
- sum1 = _mm_add_pi16(sum1, T24);
- sum1 = _mm_add_pi16(sum1, T25);
- sum1 = _mm_add_pi16(sum1, T26);
- sum1 = _mm_add_pi16(sum1, T27);
-
- T10 = _mm_cvtsi32_si64(*(int*)(fref3 + (i + 0) * frefstride));
- T11 = _mm_cvtsi32_si64(*(int*)(fref3 + (i + 1) * frefstride));
- T12 = _mm_cvtsi32_si64(*(int*)(fref3 + (i + 2) * frefstride));
- T13 = _mm_cvtsi32_si64(*(int*)(fref3 + (i + 3) * frefstride));
- T14 = _mm_cvtsi32_si64(*(int*)(fref3 + (i + 4) * frefstride));
- T15 = _mm_cvtsi32_si64(*(int*)(fref3 + (i + 5) * frefstride));
- T16 = _mm_cvtsi32_si64(*(int*)(fref3 + (i + 6) * frefstride));
- T17 = _mm_cvtsi32_si64(*(int*)(fref3 + (i + 7) * frefstride));
+
+ T10 = _mm_cvtsi32_si64(*(int*)(fref3 + frefstrideZero));
+ T11 = _mm_cvtsi32_si64(*(int*)(fref3 + frefstrideOne));
+ T10 = _mm_unpacklo_pi8(T10, T11);
+ T12 = _mm_cvtsi32_si64(*(int*)(fref3 + frefstrideTwo));
+ T13 = _mm_cvtsi32_si64(*(int*)(fref3 + frefstrideThree));
+ T12 = _mm_unpacklo_pi8(T12, T13);
T20 = _mm_sad_pu8(T00, T10);
- T21 = _mm_sad_pu8(T01, T11);
- T22 = _mm_sad_pu8(T02, T12);
- T23 = _mm_sad_pu8(T03, T13);
- T24 = _mm_sad_pu8(T04, T14);
- T25 = _mm_sad_pu8(T05, T15);
- T26 = _mm_sad_pu8(T06, T16);
- T27 = _mm_sad_pu8(T07, T17);
+ T21 = _mm_sad_pu8(T02, T12);
sum2 = _mm_add_pi16(sum2, T20);
sum2 = _mm_add_pi16(sum2, T21);
- sum2 = _mm_add_pi16(sum2, T22);
- sum2 = _mm_add_pi16(sum2, T23);
- sum2 = _mm_add_pi16(sum2, T24);
- sum2 = _mm_add_pi16(sum2, T25);
- sum2 = _mm_add_pi16(sum2, T26);
- sum2 = _mm_add_pi16(sum2, T27);
-
- T10 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 0) * frefstride));
- T11 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 1) * frefstride));
- T12 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 2) * frefstride));
- T13 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 3) * frefstride));
- T14 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 4) * frefstride));
- T15 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 5) * frefstride));
- T16 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 6) * frefstride));
- T17 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 7) * frefstride));
+
+ T10 = _mm_cvtsi32_si64(*(int*)(fref4 + frefstrideZero));
+ T11 = _mm_cvtsi32_si64(*(int*)(fref4 + frefstrideOne));
+ T10 = _mm_unpacklo_pi8(T10, T11);
+ T12 = _mm_cvtsi32_si64(*(int*)(fref4 + frefstrideTwo));
+ T13 = _mm_cvtsi32_si64(*(int*)(fref4 + frefstrideThree));
+ T12 = _mm_unpacklo_pi8(T12, T13);
T20 = _mm_sad_pu8(T00, T10);
- T21 = _mm_sad_pu8(T01, T11);
- T22 = _mm_sad_pu8(T02, T12);
- T23 = _mm_sad_pu8(T03, T13);
- T24 = _mm_sad_pu8(T04, T14);
- T25 = _mm_sad_pu8(T05, T15);
- T26 = _mm_sad_pu8(T06, T16);
- T27 = _mm_sad_pu8(T07, T17);
+ T21 = _mm_sad_pu8(T02, T12);
sum3 = _mm_add_pi16(sum3, T20);
sum3 = _mm_add_pi16(sum3, T21);
- sum3 = _mm_add_pi16(sum3, T22);
- sum3 = _mm_add_pi16(sum3, T23);
- sum3 = _mm_add_pi16(sum3, T24);
- sum3 = _mm_add_pi16(sum3, T25);
- sum3 = _mm_add_pi16(sum3, T26);
- sum3 = _mm_add_pi16(sum3, T27);
}
- }
- else
- {
- for (int i = 0; i < ly; i += 4)
- {
- T00 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 0) * FENC_STRIDE));
- T01 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 1) * FENC_STRIDE));
- T02 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 2) * FENC_STRIDE));
- T03 = _mm_cvtsi32_si64(*(int*)(fenc + (i + 3) * FENC_STRIDE));
-
- T10 = _mm_cvtsi32_si64(*(int*)(fref1 + (i + 0) * frefstride));
- T11 = _mm_cvtsi32_si64(*(int*)(fref1 + (i + 1) * frefstride));
- T12 = _mm_cvtsi32_si64(*(int*)(fref1 + (i + 2) * frefstride));
- T13 = _mm_cvtsi32_si64(*(int*)(fref1 + (i + 3) * frefstride));
-
- T20 = _mm_sad_pu8(T00, T10);
- T21 = _mm_sad_pu8(T01, T11);
- T22 = _mm_sad_pu8(T02, T12);
- T23 = _mm_sad_pu8(T03, T13);
-
- sum0 = _mm_add_pi16(sum0, T20);
- sum0 = _mm_add_pi16(sum0, T21);
- sum0 = _mm_add_pi16(sum0, T22);
- sum0 = _mm_add_pi16(sum0, T23);
-
- T10 = _mm_cvtsi32_si64(*(int*)(fref2 + (i + 0) * frefstride));
- T11 = _mm_cvtsi32_si64(*(int*)(fref2 + (i + 1) * frefstride));
- T12 = _mm_cvtsi32_si64(*(int*)(fref2 + (i + 2) * frefstride));
- T13 = _mm_cvtsi32_si64(*(int*)(fref2 + (i + 3) * frefstride));
-
- T20 = _mm_sad_pu8(T00, T10);
- T21 = _mm_sad_pu8(T01, T11);
- T22 = _mm_sad_pu8(T02, T12);
- T23 = _mm_sad_pu8(T03, T13);
-
- sum1 = _mm_add_pi16(sum1, T20);
- sum1 = _mm_add_pi16(sum1, T21);
- sum1 = _mm_add_pi16(sum1, T22);
- sum1 = _mm_add_pi16(sum1, T23);
-
- T10 = _mm_cvtsi32_si64(*(int*)(fref3 + (i + 0) * frefstride));
- T11 = _mm_cvtsi32_si64(*(int*)(fref3 + (i + 1) * frefstride));
- T12 = _mm_cvtsi32_si64(*(int*)(fref3 + (i + 2) * frefstride));
- T13 = _mm_cvtsi32_si64(*(int*)(fref3 + (i + 3) * frefstride));
-
- T20 = _mm_sad_pu8(T00, T10);
- T21 = _mm_sad_pu8(T01, T11);
- T22 = _mm_sad_pu8(T02, T12);
- T23 = _mm_sad_pu8(T03, T13);
-
- sum2 = _mm_add_pi16(sum2, T20);
- sum2 = _mm_add_pi16(sum2, T21);
- sum2 = _mm_add_pi16(sum2, T22);
- sum2 = _mm_add_pi16(sum2, T23);
-
- T10 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 0) * frefstride));
- T11 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 1) * frefstride));
- T12 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 2) * frefstride));
- T13 = _mm_cvtsi32_si64(*(int*)(fref4 + (i + 3) * frefstride));
-
- T20 = _mm_sad_pu8(T00, T10);
- T21 = _mm_sad_pu8(T01, T11);
- T22 = _mm_sad_pu8(T02, T12);
- T23 = _mm_sad_pu8(T03, T13);
-
- sum3 = _mm_add_pi16(sum3, T20);
- sum3 = _mm_add_pi16(sum3, T21);
- sum3 = _mm_add_pi16(sum3, T22);
- sum3 = _mm_add_pi16(sum3, T23);
- }
- }
-
- res[0] = _m_to_int(sum0);
- res[1] = _m_to_int(sum1);
- res[2] = _m_to_int(sum2);
- res[3] = _m_to_int(sum3);
+
+ res[0] = _m_to_int(sum0);
+ res[1] = _m_to_int(sum1);
+ res[2] = _m_to_int(sum2);
+ res[3] = _m_to_int(sum3);
+ }
}
#else /* if HAVE_MMX */
More information about the x265-devel
mailing list