[x265] Fwd: [PATCH] Replace sad_12, sad_24, sad_32 vector class functions with intrinsics
Praveen Tiwari
praveen at multicorewareinc.com
Tue Oct 1 17:11:21 CEST 2013
---------- Forwarded message ----------
From: <dnyaneshwar at multicorewareinc.com>
Date: Tue, Oct 1, 2013 at 6:38 PM
Subject: [x265] [PATCH] Replace sad_12, sad_24, sad_32 vector class
functions with intrinsics
To: x265-devel at videolan.org
# HG changeset patch
# User Dnyaneshwar
# Date 1380631342 -19800
# Tue Oct 01 18:12:22 2013 +0530
# Node ID ecc483a16f1d9e0163182d090c18fad3f1616ab5
# Parent a03659cfa9574a2639292e427b2cb3d080c648ad
Replace sad_12, sad_24, sad_32 vector class functions with intrinsics.
Performance improvement measured is close to 1.5x
diff -r a03659cfa957 -r ecc483a16f1d source/common/vec/pixel8.inc
--- a/source/common/vec/pixel8.inc Mon Sep 30 21:26:49 2013 -0500
+++ b/source/common/vec/pixel8.inc Tue Oct 01 18:12:22 2013 +0530
@@ -840,51 +840,324 @@
}
#endif /* if HAVE_MMX */
+
+template<int ly>
+int sad_12(pixel *fenc, intptr_t fencstride, pixel *fref, intptr_t
frefstride)
+{
+ assert((ly % 4) == 0);
+ __m128i sum0 = _mm_setzero_si128();
+ __m128i sum1 = _mm_setzero_si128();
>>+ __m128i T00, T01, T02, T03;
>>+ __m128i T10, T11, T12, T13;
>>+ __m128i T20, T21, T22, T23;
>>+
I think we can move above declaration to local blocks.
>>+ __m128i mask;
>>+ mask = _mm_set_epi32(0x0, 0xffffffff, 0xffffffff, 0xffffffff);
This syntax seems more obvious to me when I need to declare and Initialize
at a time and have the same effect.
__m128i mask = _mm_set_epi32(0x0, 0xffffffff, 0xffffffff, 0xffffffff);
+
+ if (ly == 4)
+ {
Here you would like to declare your temporary registers, like: (applied to
all blocks including loops)
__m128i T00, T01, T02, T03;
__m128i T10, T11, T12, T13;
__m128i T20, T21, T22, T23;
+ T00 = _mm_load_si128((__m128i*)(fenc));
+ T00 = _mm_and_si128(T00, mask);
+ T01 = _mm_load_si128((__m128i*)(fenc + fencstride));
+ T01 = _mm_and_si128(T01, mask);
+ T02 = _mm_load_si128((__m128i*)(fenc + (2) * fencstride));
+ T02 = _mm_and_si128(T02, mask);
+ T03 = _mm_load_si128((__m128i*)(fenc + (3) * fencstride));
+ T03 = _mm_and_si128(T03, mask);
+
+ T10 = _mm_load_si128((__m128i*)(fref));
+ T10 = _mm_and_si128(T10, mask);
+ T11 = _mm_load_si128((__m128i*)(fref + frefstride));
+ T11 = _mm_and_si128(T11, mask);
+ T12 = _mm_load_si128((__m128i*)(fref + (2) * frefstride));
+ T12 = _mm_and_si128(T12, mask);
+ T13 = _mm_load_si128((__m128i*)(fref + (3) * frefstride));
+ T13 = _mm_and_si128(T13, mask);
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
>>+ sum0 = _mm_add_epi16(sum0, T20);
>>+ sum0 = _mm_add_epi16(sum0, T21);
>>+ sum0 = _mm_add_epi16(sum0, T22);
>>+ sum0 = _mm_add_epi16(sum0, T23);
you can replace with following,
T20 = _mm_add_epi16(T20, T21);
T22 = _mm_add_epi16(T22, T23);
sum0 = _mm_add_epi16(T20, T22);
+ }
+ else if (ly == 8)
+ {
+ T00 = _mm_load_si128((__m128i*)(fenc));
+ T00 = _mm_and_si128(T00, mask);
+ T01 = _mm_load_si128((__m128i*)(fenc + fencstride));
+ T01 = _mm_and_si128(T01, mask);
+ T02 = _mm_load_si128((__m128i*)(fenc + (2) * fencstride));
+ T02 = _mm_and_si128(T02, mask);
+ T03 = _mm_load_si128((__m128i*)(fenc + (3) * fencstride));
+ T03 = _mm_and_si128(T03, mask);
+
+ T10 = _mm_load_si128((__m128i*)(fref));
+ T10 = _mm_and_si128(T10, mask);
+ T11 = _mm_load_si128((__m128i*)(fref + frefstride));
+ T11 = _mm_and_si128(T11, mask);
+ T12 = _mm_load_si128((__m128i*)(fref + (2) * frefstride));
+ T12 = _mm_and_si128(T12, mask);
+ T13 = _mm_load_si128((__m128i*)(fref + (3) * frefstride));
+ T13 = _mm_and_si128(T13, mask);
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
>>+ sum0 = _mm_add_epi16(sum0, T20);
>>+ sum0 = _mm_add_epi16(sum0, T21);
>>+ sum0 = _mm_add_epi16(sum0, T22);
>>+ sum0 = _mm_add_epi16(sum0, T23);
can be replaced as above: (apply wherever logically fits)
T20 = _mm_add_epi16(T20, T21);
T22 = _mm_add_epi16(T22, T23);
sum0 = _mm_add_epi16(T20, T22);
+ T00 = _mm_load_si128((__m128i*)(fenc + (4) * fencstride));
+ T00 = _mm_and_si128(T00, mask);
+ T01 = _mm_load_si128((__m128i*)(fenc + (5) * fencstride));
+ T01 = _mm_and_si128(T01, mask);
+ T02 = _mm_load_si128((__m128i*)(fenc + (6) * fencstride));
+ T02 = _mm_and_si128(T02, mask);
+ T03 = _mm_load_si128((__m128i*)(fenc + (7) * fencstride));
+ T03 = _mm_and_si128(T03, mask);
+
+ T10 = _mm_load_si128((__m128i*)(fref + (4) * frefstride));
+ T10 = _mm_and_si128(T10, mask);
+ T11 = _mm_load_si128((__m128i*)(fref + (5) * frefstride));
+ T11 = _mm_and_si128(T11, mask);
+ T12 = _mm_load_si128((__m128i*)(fref + (6) * frefstride));
+ T12 = _mm_and_si128(T12, mask);
+ T13 = _mm_load_si128((__m128i*)(fref + (7) * frefstride));
+ T13 = _mm_and_si128(T13, mask);
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi16(sum0, T20);
+ sum0 = _mm_add_epi16(sum0, T21);
+ sum0 = _mm_add_epi16(sum0, T22);
+ sum0 = _mm_add_epi16(sum0, T23);
+ }
+ else if (ly == 16)
+ {
+ T00 = _mm_load_si128((__m128i*)(fenc));
+ T00 = _mm_and_si128(T00, mask);
+ T01 = _mm_load_si128((__m128i*)(fenc + fencstride));
+ T01 = _mm_and_si128(T01, mask);
+ T02 = _mm_load_si128((__m128i*)(fenc + (2) * fencstride));
+ T02 = _mm_and_si128(T02, mask);
+ T03 = _mm_load_si128((__m128i*)(fenc + (3) * fencstride));
+ T03 = _mm_and_si128(T03, mask);
+
+ T10 = _mm_load_si128((__m128i*)(fref));
+ T10 = _mm_and_si128(T10, mask);
+ T11 = _mm_load_si128((__m128i*)(fref + frefstride));
+ T11 = _mm_and_si128(T11, mask);
+ T12 = _mm_load_si128((__m128i*)(fref + (2) * frefstride));
+ T12 = _mm_and_si128(T12, mask);
+ T13 = _mm_load_si128((__m128i*)(fref + (3) * frefstride));
+ T13 = _mm_and_si128(T13, mask);
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi16(sum0, T20);
+ sum0 = _mm_add_epi16(sum0, T21);
+ sum0 = _mm_add_epi16(sum0, T22);
+ sum0 = _mm_add_epi16(sum0, T23);
+
+ T00 = _mm_load_si128((__m128i*)(fenc + (4) * fencstride));
+ T00 = _mm_and_si128(T00, mask);
+ T01 = _mm_load_si128((__m128i*)(fenc + (5) * fencstride));
+ T01 = _mm_and_si128(T01, mask);
+ T02 = _mm_load_si128((__m128i*)(fenc + (6) * fencstride));
+ T02 = _mm_and_si128(T02, mask);
+ T03 = _mm_load_si128((__m128i*)(fenc + (7) * fencstride));
+ T03 = _mm_and_si128(T03, mask);
+
+ T10 = _mm_load_si128((__m128i*)(fref + (4) * frefstride));
+ T10 = _mm_and_si128(T10, mask);
+ T11 = _mm_load_si128((__m128i*)(fref + (5) * frefstride));
+ T11 = _mm_and_si128(T11, mask);
+ T12 = _mm_load_si128((__m128i*)(fref + (6) * frefstride));
+ T12 = _mm_and_si128(T12, mask);
+ T13 = _mm_load_si128((__m128i*)(fref + (7) * frefstride));
+ T13 = _mm_and_si128(T13, mask);
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi16(sum0, T20);
+ sum0 = _mm_add_epi16(sum0, T21);
+ sum0 = _mm_add_epi16(sum0, T22);
+ sum0 = _mm_add_epi16(sum0, T23);
+
+ T00 = _mm_load_si128((__m128i*)(fenc + (8) * fencstride));
+ T00 = _mm_and_si128(T00, mask);
+ T01 = _mm_load_si128((__m128i*)(fenc + (9) * fencstride));
+ T01 = _mm_and_si128(T01, mask);
+ T02 = _mm_load_si128((__m128i*)(fenc + (10) * fencstride));
+ T02 = _mm_and_si128(T02, mask);
+ T03 = _mm_load_si128((__m128i*)(fenc + (11) * fencstride));
+ T03 = _mm_and_si128(T03, mask);
+
+ T10 = _mm_load_si128((__m128i*)(fref + (8) * frefstride));
+ T10 = _mm_and_si128(T10, mask);
+ T11 = _mm_load_si128((__m128i*)(fref + (9) * frefstride));
+ T11 = _mm_and_si128(T11, mask);
+ T12 = _mm_load_si128((__m128i*)(fref + (10) * frefstride));
+ T12 = _mm_and_si128(T12, mask);
+ T13 = _mm_load_si128((__m128i*)(fref + (11) * frefstride));
+ T13 = _mm_and_si128(T13, mask);
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi16(sum0, T20);
+ sum0 = _mm_add_epi16(sum0, T21);
+ sum0 = _mm_add_epi16(sum0, T22);
+ sum0 = _mm_add_epi16(sum0, T23);
+
+ T00 = _mm_load_si128((__m128i*)(fenc + (12) * fencstride));
+ T00 = _mm_and_si128(T00, mask);
+ T01 = _mm_load_si128((__m128i*)(fenc + (13) * fencstride));
+ T01 = _mm_and_si128(T01, mask);
+ T02 = _mm_load_si128((__m128i*)(fenc + (14) * fencstride));
+ T02 = _mm_and_si128(T02, mask);
+ T03 = _mm_load_si128((__m128i*)(fenc + (15) * fencstride));
+ T03 = _mm_and_si128(T03, mask);
+
+ T10 = _mm_load_si128((__m128i*)(fref + (12) * frefstride));
+ T10 = _mm_and_si128(T10, mask);
+ T11 = _mm_load_si128((__m128i*)(fref + (13) * frefstride));
+ T11 = _mm_and_si128(T11, mask);
+ T12 = _mm_load_si128((__m128i*)(fref + (14) * frefstride));
+ T12 = _mm_and_si128(T12, mask);
+ T13 = _mm_load_si128((__m128i*)(fref + (15) * frefstride));
+ T13 = _mm_and_si128(T13, mask);
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi16(sum0, T20);
+ sum0 = _mm_add_epi16(sum0, T21);
+ sum0 = _mm_add_epi16(sum0, T22);
+ sum0 = _mm_add_epi16(sum0, T23);
+ }
+ else if ((ly % 8) == 0)
+ {
+ for (int i = 0; i < ly; i += 8)
+ {
+ T00 = _mm_load_si128((__m128i*)(fenc + (i) * fencstride));
+ T00 = _mm_and_si128(T00, mask);
+ T01 = _mm_load_si128((__m128i*)(fenc + (i + 1) * fencstride));
+ T01 = _mm_and_si128(T01, mask);
+ T02 = _mm_load_si128((__m128i*)(fenc + (i + 2) * fencstride));
+ T02 = _mm_and_si128(T02, mask);
+ T03 = _mm_load_si128((__m128i*)(fenc + (i + 3) * fencstride));
+ T03 = _mm_and_si128(T03, mask);
+
+ T10 = _mm_load_si128((__m128i*)(fref + (i) * frefstride));
+ T10 = _mm_and_si128(T10, mask);
+ T11 = _mm_load_si128((__m128i*)(fref + (i + 1) * frefstride));
+ T11 = _mm_and_si128(T11, mask);
+ T12 = _mm_load_si128((__m128i*)(fref + (i + 2) * frefstride));
+ T12 = _mm_and_si128(T12, mask);
+ T13 = _mm_load_si128((__m128i*)(fref + (i + 3) * frefstride));
+ T13 = _mm_and_si128(T13, mask);
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi16(sum0, T20);
+ sum0 = _mm_add_epi16(sum0, T21);
+ sum0 = _mm_add_epi16(sum0, T22);
+ sum0 = _mm_add_epi16(sum0, T23);
+
+ T00 = _mm_load_si128((__m128i*)(fenc + (i + 4) * fencstride));
+ T00 = _mm_and_si128(T00, mask);
+ T01 = _mm_load_si128((__m128i*)(fenc + (i + 5) * fencstride));
+ T01 = _mm_and_si128(T01, mask);
+ T02 = _mm_load_si128((__m128i*)(fenc + (i + 6) * fencstride));
+ T02 = _mm_and_si128(T02, mask);
+ T03 = _mm_load_si128((__m128i*)(fenc + (i + 7) * fencstride));
+ T03 = _mm_and_si128(T03, mask);
+
+ T10 = _mm_load_si128((__m128i*)(fref + (i + 4) * frefstride));
+ T10 = _mm_and_si128(T10, mask);
+ T11 = _mm_load_si128((__m128i*)(fref + (i + 5) * frefstride));
+ T11 = _mm_and_si128(T11, mask);
+ T12 = _mm_load_si128((__m128i*)(fref + (i + 6) * frefstride));
+ T12 = _mm_and_si128(T12, mask);
+ T13 = _mm_load_si128((__m128i*)(fref + (i + 7) * frefstride));
+ T13 = _mm_and_si128(T13, mask);
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi16(sum0, T20);
+ sum0 = _mm_add_epi16(sum0, T21);
+ sum0 = _mm_add_epi16(sum0, T22);
+ sum0 = _mm_add_epi16(sum0, T23);
+ }
+ }
+ else
+ {
+ for (int i = 0; i < ly; i += 4)
+ {
+ T00 = _mm_load_si128((__m128i*)(fenc + (i) * fencstride));
+ T00 = _mm_and_si128(T00, mask);
+ T01 = _mm_load_si128((__m128i*)(fenc + (i + 1) * fencstride));
+ T01 = _mm_and_si128(T01, mask);
+ T02 = _mm_load_si128((__m128i*)(fenc + (i + 2) * fencstride));
+ T02 = _mm_and_si128(T02, mask);
+ T03 = _mm_load_si128((__m128i*)(fenc + (i + 3) * fencstride));
+ T03 = _mm_and_si128(T03, mask);
+
+ T10 = _mm_load_si128((__m128i*)(fref + (i) * frefstride));
+ T10 = _mm_and_si128(T10, mask);
+ T11 = _mm_load_si128((__m128i*)(fref + (i + 1) * frefstride));
+ T11 = _mm_and_si128(T11, mask);
+ T12 = _mm_load_si128((__m128i*)(fref + (i + 2) * frefstride));
+ T12 = _mm_and_si128(T12, mask);
+ T13 = _mm_load_si128((__m128i*)(fref + (i + 3) * frefstride));
+ T13 = _mm_and_si128(T13, mask);
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi16(sum0, T20);
+ sum0 = _mm_add_epi16(sum0, T21);
+ sum0 = _mm_add_epi16(sum0, T22);
+ sum0 = _mm_add_epi16(sum0, T23);
+ }
+ }
+ sum1 = _mm_shuffle_epi32(sum0, 2);
+ sum0 = _mm_add_epi32(sum0, sum1);
+
+ return _mm_cvtsi128_si32(sum0);
+}
+
>> #endif /* SSE41 */
You can guard all functions with single if INSTRSET >= X265_CPU_LEVEL_SSE41
macro, move it at end and you have forgot to remove template meta
programming code used by old vector code, I mean stuffs like this:
template<int size>
ALWAYSINLINE void unrollFunc_64(pixel *fenc, intptr_t fencstride, pixel
*fref, intptr_t frefstride, Vec8us& sad)
{
unrollFunc_64<1>(fenc, fencstride, fref, frefstride, sad);
unrollFunc_64<size - 1>(fenc + fencstride, fencstride, fref +
frefstride, frefstride, sad);
}
Please, remove for all sizes which you have converted to intrinsics.
-template<int ly>
-int sad_12(pixel * fenc, intptr_t fencstride, pixel * fref, intptr_t
frefstride)
-{
- Vec16uc m1, n1;
-
- Vec4i sum(0);
- Vec8us sad(0);
- int max_iterators = (ly >> 4) << 4;
- int row;
-
- for (row = 0; row < max_iterators; row += 16)
- {
- for (int i = 0; i < 16; i++)
- {
- m1.load_a(fenc);
- m1.cutoff(12);
- n1.load(fref);
- n1.cutoff(12);
- sad.addSumAbsDiff(m1, n1);
-
- fenc += fencstride;
- fref += frefstride;
- }
-
- sum += extend_low(sad) + extend_high(sad);
- sad = 0;
- }
-
- while (row++ < ly)
- {
- m1.load_a(fenc);
- m1.cutoff(12);
- n1.load(fref);
- n1.cutoff(12);
- sad.addSumAbsDiff(m1, n1);
-
- fenc += fencstride;
- fref += frefstride;
- }
-
- sum += extend_low(sad) + extend_high(sad);
- return horizontal_add(sum);
-}
#if INSTRSET >= X265_CPU_LEVEL_SSE41
template<int ly>
@@ -1123,56 +1396,486 @@
#endif /* if INSTRSET >= X265_CPU_LEVEL_SSE41 */
-template<int ly>
-int sad_24(pixel *fenc, intptr_t fencstride, pixel *fref, intptr_t
frefstride)
-{
- Vec16uc m1, n1;
-
- Vec4i sum(0);
- Vec8us sad(0);
- int max_iterators = (ly >> 4) << 4;
- int row;
-
- for (row = 0; row < max_iterators; row += 16)
- {
- for (int i = 0; i < 16; i++)
- {
- m1.load_a(fenc);
- n1.load(fref);
- sad.addSumAbsDiff(m1, n1);
-
- m1.load_a(fenc + 16);
- m1.cutoff(8);
- n1.load(fref + 16);
- n1.cutoff(8);
- sad.addSumAbsDiff(m1, n1);
-
- fenc += fencstride;
- fref += frefstride;
- }
-
- sum += extend_low(sad) + extend_high(sad);
- sad = 0;
- }
-
- while (row++ < ly)
- {
- m1.load_a(fenc);
- n1.load(fref);
- sad.addSumAbsDiff(m1, n1);
-
- m1.load_a(fenc + 16);
- m1.cutoff(8);
- n1.load(fref + 16);
- n1.cutoff(8);
- sad.addSumAbsDiff(m1, n1);
-
- fenc += fencstride;
- fref += frefstride;
- }
-
- sum += extend_low(sad) + extend_high(sad);
- return horizontal_add(sum);
+template<int ly>
+int sad_24(pixel *fenc, intptr_t fencstride, pixel *fref, intptr_t
frefstride)
+{
+ assert((ly % 4) == 0);
+ __m128i sum0 = _mm_setzero_si128();
+ __m128i sum1 = _mm_setzero_si128();
+ __m128i T00, T01, T02, T03;
+ __m128i T10, T11, T12, T13;
+ __m128i T20, T21, T22, T23;
+
+ if (ly == 4)
+ {
+ T00 = _mm_load_si128((__m128i*)(fenc));
+ T01 = _mm_load_si128((__m128i*)(fenc + fencstride));
+ T02 = _mm_load_si128((__m128i*)(fenc + (2) * fencstride));
+ T03 = _mm_load_si128((__m128i*)(fenc + (3) * fencstride));
+
+ T10 = _mm_load_si128((__m128i*)(fref));
+ T11 = _mm_load_si128((__m128i*)(fref + frefstride));
+ T12 = _mm_load_si128((__m128i*)(fref + (2) * frefstride));
+ T13 = _mm_load_si128((__m128i*)(fref + (3) * frefstride));
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi16(sum0, T20);
+ sum0 = _mm_add_epi16(sum0, T21);
+ sum0 = _mm_add_epi16(sum0, T22);
+ sum0 = _mm_add_epi16(sum0, T23);
+
+ T00 = _mm_loadl_epi64((__m128i*)(fenc + 16));
+ T01 = _mm_loadl_epi64((__m128i*)(fenc + (fencstride + 16)));
+ T01 = _mm_unpacklo_epi64(T00, T01);
+
+ T02 = _mm_loadl_epi64((__m128i*)(fenc + ((2) * fencstride) + 16));
+ T03 = _mm_loadl_epi64((__m128i*)(fenc + ((3) * fencstride) + 16));
+ T03 = _mm_unpacklo_epi64(T02, T03);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref + ((0) * frefstride) + 16));
+ T11 = _mm_loadl_epi64((__m128i*)(fref + ((1) * frefstride) + 16));
+ T11 = _mm_unpacklo_epi64(T10, T11);
+
+ T12 = _mm_loadl_epi64((__m128i*)(fref + ((2) * frefstride) + 16));
+ T13 = _mm_loadl_epi64((__m128i*)(fref + ((3) * frefstride) + 16));
+ T13 = _mm_unpacklo_epi64(T12, T13);
+
+ T20 = _mm_setzero_si128();
+ T21 = _mm_setzero_si128();
+
+ T20 = _mm_sad_epu8(T01, T11);
+ T21 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi32(sum0, T20);
+ sum0 = _mm_add_epi32(sum0, T21);
+ }
+ else if (ly == 8)
+ {
+ T00 = _mm_load_si128((__m128i*)(fenc));
+ T01 = _mm_load_si128((__m128i*)(fenc + fencstride));
+ T02 = _mm_load_si128((__m128i*)(fenc + (2) * fencstride));
+ T03 = _mm_load_si128((__m128i*)(fenc + (3) * fencstride));
+
+ T10 = _mm_load_si128((__m128i*)(fref));
+ T11 = _mm_load_si128((__m128i*)(fref + frefstride));
+ T12 = _mm_load_si128((__m128i*)(fref + (2) * frefstride));
+ T13 = _mm_load_si128((__m128i*)(fref + (3) * frefstride));
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi16(sum0, T20);
+ sum0 = _mm_add_epi16(sum0, T21);
+ sum0 = _mm_add_epi16(sum0, T22);
+ sum0 = _mm_add_epi16(sum0, T23);
+
+ T00 = _mm_loadl_epi64((__m128i*)(fenc + 16));
+ T01 = _mm_loadl_epi64((__m128i*)(fenc + (fencstride + 16)));
+ T01 = _mm_unpacklo_epi64(T00, T01);
+
+ T02 = _mm_loadl_epi64((__m128i*)(fenc + ((2) * fencstride) + 16));
+ T03 = _mm_loadl_epi64((__m128i*)(fenc + ((3) * fencstride) + 16));
+ T03 = _mm_unpacklo_epi64(T02, T03);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref + ((0) * frefstride) + 16));
+ T11 = _mm_loadl_epi64((__m128i*)(fref + ((1) * frefstride) + 16));
+ T11 = _mm_unpacklo_epi64(T10, T11);
+
+ T12 = _mm_loadl_epi64((__m128i*)(fref + ((2) * frefstride) + 16));
+ T13 = _mm_loadl_epi64((__m128i*)(fref + ((3) * frefstride) + 16));
+ T13 = _mm_unpacklo_epi64(T12, T13);
+
+ T20 = _mm_setzero_si128();
+ T21 = _mm_setzero_si128();
+
+ T20 = _mm_sad_epu8(T01, T11);
+ T21 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi32(sum0, T20);
+ sum0 = _mm_add_epi32(sum0, T21);
+
+ T00 = _mm_load_si128((__m128i*)(fenc + (4) * fencstride));
+ T01 = _mm_load_si128((__m128i*)(fenc + (5) * fencstride));
+ T02 = _mm_load_si128((__m128i*)(fenc + (6) * fencstride));
+ T03 = _mm_load_si128((__m128i*)(fenc + (7) * fencstride));
+
+ T10 = _mm_load_si128((__m128i*)(fref + (4) * frefstride));
+ T11 = _mm_load_si128((__m128i*)(fref + (5) * frefstride));
+ T12 = _mm_load_si128((__m128i*)(fref + (6) * frefstride));
+ T13 = _mm_load_si128((__m128i*)(fref + (7) * frefstride));
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi16(sum0, T20);
+ sum0 = _mm_add_epi16(sum0, T21);
+ sum0 = _mm_add_epi16(sum0, T22);
+ sum0 = _mm_add_epi16(sum0, T23);
+
+ T00 = _mm_loadl_epi64((__m128i*)(fenc + ((4) * fencstride) + 16));
+ T01 = _mm_loadl_epi64((__m128i*)(fenc + ((5) * fencstride) + 16));
+ T01 = _mm_unpacklo_epi64(T00, T01);
+
+ T02 = _mm_loadl_epi64((__m128i*)(fenc + ((6) * fencstride) + 16));
+ T03 = _mm_loadl_epi64((__m128i*)(fenc + ((7) * fencstride) + 16));
+ T03 = _mm_unpacklo_epi64(T02, T03);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref + ((4) * frefstride) + 16));
+ T11 = _mm_loadl_epi64((__m128i*)(fref + ((5) * frefstride) + 16));
+ T11 = _mm_unpacklo_epi64(T10, T11);
+
+ T12 = _mm_loadl_epi64((__m128i*)(fref + ((6) * frefstride) + 16));
+ T13 = _mm_loadl_epi64((__m128i*)(fref + ((7) * frefstride) + 16));
+ T13 = _mm_unpacklo_epi64(T12, T13);
+
+ T20 = _mm_setzero_si128();
+ T21 = _mm_setzero_si128();
+
+ T20 = _mm_sad_epu8(T01, T11);
+ T21 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi32(sum0, T20);
+ sum0 = _mm_add_epi32(sum0, T21);
+ }
+ else if (ly == 16)
+ {
+ T00 = _mm_load_si128((__m128i*)(fenc));
+ T01 = _mm_load_si128((__m128i*)(fenc + fencstride));
+ T02 = _mm_load_si128((__m128i*)(fenc + (2) * fencstride));
+ T03 = _mm_load_si128((__m128i*)(fenc + (3) * fencstride));
+
+ T10 = _mm_load_si128((__m128i*)(fref));
+ T11 = _mm_load_si128((__m128i*)(fref + frefstride));
+ T12 = _mm_load_si128((__m128i*)(fref + (2) * frefstride));
+ T13 = _mm_load_si128((__m128i*)(fref + (3) * frefstride));
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi16(sum0, T20);
+ sum0 = _mm_add_epi16(sum0, T21);
+ sum0 = _mm_add_epi16(sum0, T22);
+ sum0 = _mm_add_epi16(sum0, T23);
+
+ T00 = _mm_loadl_epi64((__m128i*)(fenc + 16));
+ T01 = _mm_loadl_epi64((__m128i*)(fenc + fencstride + 16));
+ T01 = _mm_unpacklo_epi64(T00, T01);
+
+ T02 = _mm_loadl_epi64((__m128i*)(fenc + ((2) * fencstride) + 16));
+ T03 = _mm_loadl_epi64((__m128i*)(fenc + ((3) * fencstride) + 16));
+ T03 = _mm_unpacklo_epi64(T02, T03);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref + 16));
+ T11 = _mm_loadl_epi64((__m128i*)(fref + (frefstride + 16)));
+ T11 = _mm_unpacklo_epi64(T10, T11);
+
+ T12 = _mm_loadl_epi64((__m128i*)(fref + ((2) * frefstride) + 16));
+ T13 = _mm_loadl_epi64((__m128i*)(fref + ((3) * frefstride) + 16));
+ T13 = _mm_unpacklo_epi64(T12, T13);
+
+ T20 = _mm_setzero_si128();
+ T21 = _mm_setzero_si128();
+
+ T20 = _mm_sad_epu8(T01, T11);
+ T21 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi32(sum0, T20);
+ sum0 = _mm_add_epi32(sum0, T21);
+
+ T00 = _mm_load_si128((__m128i*)(fenc + (4) * fencstride));
+ T01 = _mm_load_si128((__m128i*)(fenc + (5) * fencstride));
+ T02 = _mm_load_si128((__m128i*)(fenc + (6) * fencstride));
+ T03 = _mm_load_si128((__m128i*)(fenc + (7) * fencstride));
+
+ T10 = _mm_load_si128((__m128i*)(fref + (4) * frefstride));
+ T11 = _mm_load_si128((__m128i*)(fref + (5) * frefstride));
+ T12 = _mm_load_si128((__m128i*)(fref + (6) * frefstride));
+ T13 = _mm_load_si128((__m128i*)(fref + (7) * frefstride));
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi16(sum0, T20);
+ sum0 = _mm_add_epi16(sum0, T21);
+ sum0 = _mm_add_epi16(sum0, T22);
+ sum0 = _mm_add_epi16(sum0, T23);
+
+ T00 = _mm_loadl_epi64((__m128i*)(fenc + ((4) * fencstride) + 16));
+ T01 = _mm_loadl_epi64((__m128i*)(fenc + ((5) * fencstride) + 16));
+ T01 = _mm_unpacklo_epi64(T00, T01);
+
+ T02 = _mm_loadl_epi64((__m128i*)(fenc + ((6) * fencstride) + 16));
+ T03 = _mm_loadl_epi64((__m128i*)(fenc + ((7) * fencstride) + 16));
+ T03 = _mm_unpacklo_epi64(T02, T03);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref + ((4) * frefstride) + 16));
+ T11 = _mm_loadl_epi64((__m128i*)(fref + ((5) * frefstride) + 16));
+ T11 = _mm_unpacklo_epi64(T10, T11);
+
+ T12 = _mm_loadl_epi64((__m128i*)(fref + ((6) * frefstride) + 16));
+ T13 = _mm_loadl_epi64((__m128i*)(fref + ((7) * frefstride) + 16));
+ T13 = _mm_unpacklo_epi64(T12, T13);
+
+ T20 = _mm_setzero_si128();
+ T21 = _mm_setzero_si128();
+
+ T20 = _mm_sad_epu8(T01, T11);
+ T21 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi32(sum0, T20);
+ sum0 = _mm_add_epi32(sum0, T21);
+
+ T00 = _mm_load_si128((__m128i*)(fenc + (8) * fencstride));
+ T01 = _mm_load_si128((__m128i*)(fenc + (9) * fencstride));
+ T02 = _mm_load_si128((__m128i*)(fenc + (10) * fencstride));
+ T03 = _mm_load_si128((__m128i*)(fenc + (11) * fencstride));
+
+ T10 = _mm_load_si128((__m128i*)(fref + (8) * frefstride));
+ T11 = _mm_load_si128((__m128i*)(fref + (9) * frefstride));
+ T12 = _mm_load_si128((__m128i*)(fref + (10) * frefstride));
+ T13 = _mm_load_si128((__m128i*)(fref + (11) * frefstride));
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi16(sum0, T20);
+ sum0 = _mm_add_epi16(sum0, T21);
+ sum0 = _mm_add_epi16(sum0, T22);
+ sum0 = _mm_add_epi16(sum0, T23);
+
+ T00 = _mm_loadl_epi64((__m128i*)(fenc + ((8) * fencstride) + 16));
+ T01 = _mm_loadl_epi64((__m128i*)(fenc + ((9) * fencstride) + 16));
+ T01 = _mm_unpacklo_epi64(T00, T01);
+
+ T02 = _mm_loadl_epi64((__m128i*)(fenc + ((10) * fencstride) + 16));
+ T03 = _mm_loadl_epi64((__m128i*)(fenc + ((11) * fencstride) + 16));
+ T03 = _mm_unpacklo_epi64(T02, T03);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref + ((8) * frefstride) + 16));
+ T11 = _mm_loadl_epi64((__m128i*)(fref + ((9) * frefstride) + 16));
+ T11 = _mm_unpacklo_epi64(T10, T11);
+
+ T12 = _mm_loadl_epi64((__m128i*)(fref + ((10) * frefstride) + 16));
+ T13 = _mm_loadl_epi64((__m128i*)(fref + ((11) * frefstride) + 16));
+ T13 = _mm_unpacklo_epi64(T12, T13);
+
+ T20 = _mm_setzero_si128();
+ T21 = _mm_setzero_si128();
+
+ T20 = _mm_sad_epu8(T01, T11);
+ T21 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi32(sum0, T20);
+ sum0 = _mm_add_epi32(sum0, T21);
+
+ T00 = _mm_load_si128((__m128i*)(fenc + (12) * fencstride));
+ T01 = _mm_load_si128((__m128i*)(fenc + (13) * fencstride));
+ T02 = _mm_load_si128((__m128i*)(fenc + (14) * fencstride));
+ T03 = _mm_load_si128((__m128i*)(fenc + (15) * fencstride));
+
+ T10 = _mm_load_si128((__m128i*)(fref + (12) * frefstride));
+ T11 = _mm_load_si128((__m128i*)(fref + (13) * frefstride));
+ T12 = _mm_load_si128((__m128i*)(fref + (14) * frefstride));
+ T13 = _mm_load_si128((__m128i*)(fref + (15) * frefstride));
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi16(sum0, T20);
+ sum0 = _mm_add_epi16(sum0, T21);
+ sum0 = _mm_add_epi16(sum0, T22);
+ sum0 = _mm_add_epi16(sum0, T23);
+
+ T00 = _mm_loadl_epi64((__m128i*)(fenc + ((12) * fencstride) + 16));
+ T01 = _mm_loadl_epi64((__m128i*)(fenc + ((13) * fencstride) + 16));
+ T01 = _mm_unpacklo_epi64(T00, T01);
+
+ T02 = _mm_loadl_epi64((__m128i*)(fenc + ((14) * fencstride) + 16));
+ T03 = _mm_loadl_epi64((__m128i*)(fenc + ((15) * fencstride) + 16));
+ T03 = _mm_unpacklo_epi64(T02, T03);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref + ((12) * frefstride) + 16));
+ T11 = _mm_loadl_epi64((__m128i*)(fref + ((13) * frefstride) + 16));
+ T11 = _mm_unpacklo_epi64(T10, T11);
+
+ T12 = _mm_loadl_epi64((__m128i*)(fref + ((14) * frefstride) + 16));
+ T13 = _mm_loadl_epi64((__m128i*)(fref + ((15) * frefstride) + 16));
+ T13 = _mm_unpacklo_epi64(T12, T13);
+
+ T20 = _mm_setzero_si128();
+ T21 = _mm_setzero_si128();
+
+ T20 = _mm_sad_epu8(T01, T11);
+ T21 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi32(sum0, T20);
+ sum0 = _mm_add_epi32(sum0, T21);
+ }
+ else if ((ly % 8) == 0)
+ {
+ for (int i = 0; i < ly; i += 8)
+ {
+ T00 = _mm_load_si128((__m128i*)(fenc + (i) * fencstride));
+ T01 = _mm_load_si128((__m128i*)(fenc + (i + 1) * fencstride));
+ T02 = _mm_load_si128((__m128i*)(fenc + (i + 2) * fencstride));
+ T03 = _mm_load_si128((__m128i*)(fenc + (i + 3) * fencstride));
+
+ T10 = _mm_load_si128((__m128i*)(fref + (i) * frefstride));
+ T11 = _mm_load_si128((__m128i*)(fref + (i + 1) * frefstride));
+ T12 = _mm_load_si128((__m128i*)(fref + (i + 2) * frefstride));
+ T13 = _mm_load_si128((__m128i*)(fref + (i + 3) * frefstride));
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi32(sum0, T20);
+ sum0 = _mm_add_epi32(sum0, T21);
+ sum0 = _mm_add_epi32(sum0, T22);
+ sum0 = _mm_add_epi32(sum0, T23);
+
+ T00 = _mm_loadl_epi64((__m128i*)(fenc + ((i) * fencstride) +
16));
+ T01 = _mm_loadl_epi64((__m128i*)(fenc + ((i + 1) * fencstride)
+ 16));
+ T01 = _mm_unpacklo_epi64(T00, T01);
+
+ T02 = _mm_loadl_epi64((__m128i*)(fenc + ((i + 2) * fencstride)
+ 16));
+ T03 = _mm_loadl_epi64((__m128i*)(fenc + ((i + 3) * fencstride)
+ 16));
+ T03 = _mm_unpacklo_epi64(T02, T03);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref + ((i) * frefstride) +
16));
+ T11 = _mm_loadl_epi64((__m128i*)(fref + ((i + 1) * frefstride)
+ 16));
+ T11 = _mm_unpacklo_epi64(T10, T11);
+
+ T12 = _mm_loadl_epi64((__m128i*)(fref + ((i + 2) * frefstride)
+ 16));
+ T13 = _mm_loadl_epi64((__m128i*)(fref + ((i + 3) * frefstride)
+ 16));
+ T13 = _mm_unpacklo_epi64(T12, T13);
+
+ T20 = _mm_setzero_si128();
+ T21 = _mm_setzero_si128();
+
+ T20 = _mm_sad_epu8(T01, T11);
+ T21 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi32(sum0, T20);
+ sum0 = _mm_add_epi32(sum0, T21);
+
+ T00 = _mm_load_si128((__m128i*)(fenc + (i + 4) * fencstride));
+ T01 = _mm_load_si128((__m128i*)(fenc + (i + 5) * fencstride));
+ T02 = _mm_load_si128((__m128i*)(fenc + (i + 6) * fencstride));
+ T03 = _mm_load_si128((__m128i*)(fenc + (i + 7) * fencstride));
+
+ T10 = _mm_load_si128((__m128i*)(fref + (i + 4) * frefstride));
+ T11 = _mm_load_si128((__m128i*)(fref + (i + 5) * frefstride));
+ T12 = _mm_load_si128((__m128i*)(fref + (i + 6) * frefstride));
+ T13 = _mm_load_si128((__m128i*)(fref + (i + 7) * frefstride));
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi32(sum0, T20);
+ sum0 = _mm_add_epi32(sum0, T21);
+ sum0 = _mm_add_epi32(sum0, T22);
+ sum0 = _mm_add_epi32(sum0, T23);
+
+ T00 = _mm_loadl_epi64((__m128i*)(fenc + ((i + 4) * fencstride)
+ 16));
+ T01 = _mm_loadl_epi64((__m128i*)(fenc + ((i + 5) * fencstride)
+ 16));
+ T01 = _mm_unpacklo_epi64(T00, T01);
+
+ T02 = _mm_loadl_epi64((__m128i*)(fenc + ((i + 6) * fencstride)
+ 16));
+ T03 = _mm_loadl_epi64((__m128i*)(fenc + ((i + 7) * fencstride)
+ 16));
+ T03 = _mm_unpacklo_epi64(T02, T03);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref + ((i + 4) * frefstride)
+ 16));
+ T11 = _mm_loadl_epi64((__m128i*)(fref + ((i + 5) * frefstride)
+ 16));
+ T11 = _mm_unpacklo_epi64(T10, T11);
+
+ T12 = _mm_loadl_epi64((__m128i*)(fref + ((i + 6) * frefstride)
+ 16));
+ T13 = _mm_loadl_epi64((__m128i*)(fref + ((i + 7) * frefstride)
+ 16));
+ T13 = _mm_unpacklo_epi64(T12, T13);
+
+ T20 = _mm_setzero_si128();
+ T21 = _mm_setzero_si128();
+
+ T20 = _mm_sad_epu8(T01, T11);
+ T21 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi32(sum0, T20);
+ sum0 = _mm_add_epi32(sum0, T21);
+ }
+ }
+ else
+ {
+ for (int i = 0; i < ly; i += 4)
+ {
+ T00 = _mm_load_si128((__m128i*)(fenc + (i) * fencstride));
+ T01 = _mm_load_si128((__m128i*)(fenc + (i + 1) * fencstride));
+ T02 = _mm_load_si128((__m128i*)(fenc + (i + 2) * fencstride));
+ T03 = _mm_load_si128((__m128i*)(fenc + (i + 3) * fencstride));
+
+ T10 = _mm_load_si128((__m128i*)(fref + (i) * frefstride));
+ T11 = _mm_load_si128((__m128i*)(fref + (i + 1) * frefstride));
+ T12 = _mm_load_si128((__m128i*)(fref + (i + 2) * frefstride));
+ T13 = _mm_load_si128((__m128i*)(fref + (i + 3) * frefstride));
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi16(sum0, T20);
+ sum0 = _mm_add_epi16(sum0, T21);
+ sum0 = _mm_add_epi16(sum0, T22);
+ sum0 = _mm_add_epi16(sum0, T23);
+
+ T00 = _mm_loadl_epi64((__m128i*)(fenc + ((i) * fencstride) +
16));
+ T01 = _mm_loadl_epi64((__m128i*)(fenc + ((i + 1) * fencstride)
+ 16));
+ T01 = _mm_unpacklo_epi64(T00, T01);
+
+ T02 = _mm_loadl_epi64((__m128i*)(fenc + ((i + 2) * fencstride)
+ 16));
+ T03 = _mm_loadl_epi64((__m128i*)(fenc + ((i + 3) * fencstride)
+ 16));
+ T03 = _mm_unpacklo_epi64(T02, T03);
+
+ T10 = _mm_loadl_epi64((__m128i*)(fref + ((i) * frefstride) +
16));
+ T11 = _mm_loadl_epi64((__m128i*)(fref + ((i + 1) * frefstride)
+ 16));
+ T11 = _mm_unpacklo_epi64(T10, T11);
+
+ T12 = _mm_loadl_epi64((__m128i*)(fref + ((i + 2) * frefstride)
+ 16));
+ T13 = _mm_loadl_epi64((__m128i*)(fref + ((i + 3) * frefstride)
+ 16));
+ T13 = _mm_unpacklo_epi64(T12, T13);
+
+ T20 = _mm_setzero_si128();
+ T21 = _mm_setzero_si128();
+
+ T20 = _mm_sad_epu8(T01, T11);
+ T21 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi32(sum0, T20);
+ sum0 = _mm_add_epi32(sum0, T21);
+ }
+ }
+ sum1 = _mm_shuffle_epi32(sum0, 2);
+ sum0 = _mm_add_epi32(sum0, sum1);
+
+ return _mm_cvtsi128_si32(sum0);
}
template<int size>
@@ -1196,30 +1899,437 @@
sad.addSumAbsDiff(m1, n1);
}
-template<int ly>
-int sad_32(pixel * fenc, intptr_t fencstride, pixel * fref, intptr_t
frefstride)
-{
- Vec4i sum(0);
- Vec8us sad;
- int max_iterators = (ly >> 2) << 2;
- int row;
- if (ly == 4)
- {
- sad = 0;
- unrollFunc_32<4>(fenc, fencstride, fref, frefstride, sad);
- sum += extend_low(sad) + extend_high(sad);
- return horizontal_add(sum);
- }
- for (row = 0; row < max_iterators; row += 4)
- {
- sad = 0;
- unrollFunc_32<4>(fenc, fencstride, fref, frefstride, sad);
- sum += extend_low(sad) + extend_high(sad);
- fenc += fencstride * 4;
- fref += frefstride * 4;
- }
-
- return horizontal_add(sum);
+template<int ly>
+int sad_32(pixel * fenc, intptr_t fencstride, pixel * fref, intptr_t
frefstride)
+{
+ assert((ly % 4) == 0);
+
+ __m128i sum0 = _mm_setzero_si128();
+ __m128i sum1 = _mm_setzero_si128();
+ __m128i T00, T01, T02, T03;
+ __m128i T10, T11, T12, T13;
+ __m128i T20, T21, T22, T23;
+
+ if (ly == 4)
+ {
+ T00 = _mm_load_si128((__m128i*)(fenc));
+ T01 = _mm_load_si128((__m128i*)(fenc + fencstride));
+ T02 = _mm_load_si128((__m128i*)(fenc + (2) * fencstride));
+ T03 = _mm_load_si128((__m128i*)(fenc + (3) * fencstride));
+
+ T10 = _mm_loadu_si128((__m128i*)(fref));
+ T11 = _mm_loadu_si128((__m128i*)(fref + frefstride));
+ T12 = _mm_loadu_si128((__m128i*)(fref + (2) * frefstride));
+ T13 = _mm_loadu_si128((__m128i*)(fref + (3) * frefstride));
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi32(sum0, T20);
+ sum0 = _mm_add_epi32(sum0, T21);
+ sum0 = _mm_add_epi32(sum0, T22);
+ sum0 = _mm_add_epi32(sum0, T23);
+
+ T00 = _mm_load_si128((__m128i*)(fenc + 16));
+ T01 = _mm_load_si128((__m128i*)(fenc + fencstride + 16));
+ T02 = _mm_load_si128((__m128i*)(fenc + ((2) * fencstride) + 16));
+ T03 = _mm_load_si128((__m128i*)(fenc + ((3) * fencstride) + 16));
+
+ T10 = _mm_loadu_si128((__m128i*)(fref + 16));
+ T11 = _mm_loadu_si128((__m128i*)(fref + frefstride + 16));
+ T12 = _mm_loadu_si128((__m128i*)(fref + ((2) * frefstride) + 16));
+ T13 = _mm_loadu_si128((__m128i*)(fref + ((3) * frefstride) + 16));
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi32(sum0, T20);
+ sum0 = _mm_add_epi32(sum0, T21);
+ sum0 = _mm_add_epi32(sum0, T22);
+ sum0 = _mm_add_epi32(sum0, T23);
+ }
+ else if (ly == 8)
+ {
+ T00 = _mm_load_si128((__m128i*)(fenc));
+ T01 = _mm_load_si128((__m128i*)(fenc + fencstride));
+ T02 = _mm_load_si128((__m128i*)(fenc + (2) * fencstride));
+ T03 = _mm_load_si128((__m128i*)(fenc + (3) * fencstride));
+
+ T10 = _mm_loadu_si128((__m128i*)(fref));
+ T11 = _mm_loadu_si128((__m128i*)(fref + frefstride));
+ T12 = _mm_loadu_si128((__m128i*)(fref + (2) * frefstride));
+ T13 = _mm_loadu_si128((__m128i*)(fref + (3) * frefstride));
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi32(sum0, T20);
+ sum0 = _mm_add_epi32(sum0, T21);
+ sum0 = _mm_add_epi32(sum0, T22);
+ sum0 = _mm_add_epi32(sum0, T23);
+
+ T00 = _mm_load_si128((__m128i*)(fenc + 16));
+ T01 = _mm_load_si128((__m128i*)(fenc + fencstride + 16));
+ T02 = _mm_load_si128((__m128i*)(fenc + ((2) * fencstride) + 16));
+ T03 = _mm_load_si128((__m128i*)(fenc + ((3) * fencstride) + 16));
+
+ T10 = _mm_loadu_si128((__m128i*)(fref + 16));
+ T11 = _mm_loadu_si128((__m128i*)(fref + frefstride + 16));
+ T12 = _mm_loadu_si128((__m128i*)(fref + ((2) * frefstride) + 16));
+ T13 = _mm_loadu_si128((__m128i*)(fref + ((3) * frefstride) + 16));
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi32(sum0, T20);
+ sum0 = _mm_add_epi32(sum0, T21);
+ sum0 = _mm_add_epi32(sum0, T22);
+ sum0 = _mm_add_epi32(sum0, T23);
+
+ T00 = _mm_load_si128((__m128i*)(fenc + (4) * fencstride));
+ T01 = _mm_load_si128((__m128i*)(fenc + (5) * fencstride));
+ T02 = _mm_load_si128((__m128i*)(fenc + (6) * fencstride));
+ T03 = _mm_load_si128((__m128i*)(fenc + (7) * fencstride));
+
+ T10 = _mm_loadu_si128((__m128i*)(fref + (4) * frefstride));
+ T11 = _mm_loadu_si128((__m128i*)(fref + (5) * frefstride));
+ T12 = _mm_loadu_si128((__m128i*)(fref + (6) * frefstride));
+ T13 = _mm_loadu_si128((__m128i*)(fref + (7) * frefstride));
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi32(sum0, T20);
+ sum0 = _mm_add_epi32(sum0, T21);
+ sum0 = _mm_add_epi32(sum0, T22);
+ sum0 = _mm_add_epi32(sum0, T23);
+
+ T00 = _mm_load_si128((__m128i*)(fenc + ((4) * fencstride) + 16));
+ T01 = _mm_load_si128((__m128i*)(fenc + ((5) * fencstride) + 16));
+ T02 = _mm_load_si128((__m128i*)(fenc + ((6) * fencstride) + 16));
+ T03 = _mm_load_si128((__m128i*)(fenc + ((7) * fencstride) + 16));
+
+ T10 = _mm_loadu_si128((__m128i*)(fref + ((4) * frefstride) + 16));
+ T11 = _mm_loadu_si128((__m128i*)(fref + ((5) * frefstride) + 16));
+ T12 = _mm_loadu_si128((__m128i*)(fref + ((6) * frefstride) + 16));
+ T13 = _mm_loadu_si128((__m128i*)(fref + ((7) * frefstride) + 16));
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi32(sum0, T20);
+ sum0 = _mm_add_epi32(sum0, T21);
+ sum0 = _mm_add_epi32(sum0, T22);
+ sum0 = _mm_add_epi32(sum0, T23);
+ }
+ else if (ly == 16)
+ {
+ T00 = _mm_load_si128((__m128i*)(fenc));
+ T01 = _mm_load_si128((__m128i*)(fenc + fencstride));
+ T02 = _mm_load_si128((__m128i*)(fenc + (2) * fencstride));
+ T03 = _mm_load_si128((__m128i*)(fenc + (3) * fencstride));
+
+ T10 = _mm_loadu_si128((__m128i*)(fref));
+ T11 = _mm_loadu_si128((__m128i*)(fref + frefstride));
+ T12 = _mm_loadu_si128((__m128i*)(fref + (2) * frefstride));
+ T13 = _mm_loadu_si128((__m128i*)(fref + (3) * frefstride));
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi32(sum0, T20);
+ sum0 = _mm_add_epi32(sum0, T21);
+ sum0 = _mm_add_epi32(sum0, T22);
+ sum0 = _mm_add_epi32(sum0, T23);
+
+ T00 = _mm_load_si128((__m128i*)(fenc + 16));
+ T01 = _mm_load_si128((__m128i*)(fenc + fencstride + 16));
+ T02 = _mm_load_si128((__m128i*)(fenc + ((2) * fencstride) + 16));
+ T03 = _mm_load_si128((__m128i*)(fenc + ((3) * fencstride) + 16));
+
+ T10 = _mm_loadu_si128((__m128i*)(fref + 16));
+ T11 = _mm_loadu_si128((__m128i*)(fref + frefstride + 16));
+ T12 = _mm_loadu_si128((__m128i*)(fref + ((2) * frefstride) + 16));
+ T13 = _mm_loadu_si128((__m128i*)(fref + ((3) * frefstride) + 16));
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi32(sum0, T20);
+ sum0 = _mm_add_epi32(sum0, T21);
+ sum0 = _mm_add_epi32(sum0, T22);
+ sum0 = _mm_add_epi32(sum0, T23);
+
+ T00 = _mm_load_si128((__m128i*)(fenc + (4) * fencstride));
+ T01 = _mm_load_si128((__m128i*)(fenc + (5) * fencstride));
+ T02 = _mm_load_si128((__m128i*)(fenc + (6) * fencstride));
+ T03 = _mm_load_si128((__m128i*)(fenc + (7) * fencstride));
+
+ T10 = _mm_loadu_si128((__m128i*)(fref + (4) * frefstride));
+ T11 = _mm_loadu_si128((__m128i*)(fref + (5) * frefstride));
+ T12 = _mm_loadu_si128((__m128i*)(fref + (6) * frefstride));
+ T13 = _mm_loadu_si128((__m128i*)(fref + (7) * frefstride));
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi32(sum0, T20);
+ sum0 = _mm_add_epi32(sum0, T21);
+ sum0 = _mm_add_epi32(sum0, T22);
+ sum0 = _mm_add_epi32(sum0, T23);
+
+ T00 = _mm_load_si128((__m128i*)(fenc + ((4) * fencstride) + 16));
+ T01 = _mm_load_si128((__m128i*)(fenc + ((5) * fencstride) + 16));
+ T02 = _mm_load_si128((__m128i*)(fenc + ((6) * fencstride) + 16));
+ T03 = _mm_load_si128((__m128i*)(fenc + ((7) * fencstride) + 16));
+
+ T10 = _mm_loadu_si128((__m128i*)(fref + ((4) * frefstride) + 16));
+ T11 = _mm_loadu_si128((__m128i*)(fref + ((5) * frefstride) + 16));
+ T12 = _mm_loadu_si128((__m128i*)(fref + ((6) * frefstride) + 16));
+ T13 = _mm_loadu_si128((__m128i*)(fref + ((7) * frefstride) + 16));
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi32(sum0, T20);
+ sum0 = _mm_add_epi32(sum0, T21);
+ sum0 = _mm_add_epi32(sum0, T22);
+ sum0 = _mm_add_epi32(sum0, T23);
+
+ T00 = _mm_load_si128((__m128i*)(fenc + (8) * fencstride));
+ T01 = _mm_load_si128((__m128i*)(fenc + (9) * fencstride));
+ T02 = _mm_load_si128((__m128i*)(fenc + (10) * fencstride));
+ T03 = _mm_load_si128((__m128i*)(fenc + (11) * fencstride));
+
+ T10 = _mm_loadu_si128((__m128i*)(fref + (8) * frefstride));
+ T11 = _mm_loadu_si128((__m128i*)(fref + (9) * frefstride));
+ T12 = _mm_loadu_si128((__m128i*)(fref + (10) * frefstride));
+ T13 = _mm_loadu_si128((__m128i*)(fref + (11) * frefstride));
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi32(sum0, T20);
+ sum0 = _mm_add_epi32(sum0, T21);
+ sum0 = _mm_add_epi32(sum0, T22);
+ sum0 = _mm_add_epi32(sum0, T23);
+
+ T00 = _mm_load_si128((__m128i*)(fenc + ((8) * fencstride) + 16));
+ T01 = _mm_load_si128((__m128i*)(fenc + ((9) * fencstride) + 16));
+ T02 = _mm_load_si128((__m128i*)(fenc + ((10) * fencstride) + 16));
+ T03 = _mm_load_si128((__m128i*)(fenc + ((11) * fencstride) + 16));
+
+ T10 = _mm_loadu_si128((__m128i*)(fref + ((8) * frefstride) + 16));
+ T11 = _mm_loadu_si128((__m128i*)(fref + ((9) * frefstride) + 16));
+ T12 = _mm_loadu_si128((__m128i*)(fref + ((10) * frefstride) + 16));
+ T13 = _mm_loadu_si128((__m128i*)(fref + ((11) * frefstride) + 16));
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi16(sum0, T20);
+ sum0 = _mm_add_epi16(sum0, T21);
+ sum0 = _mm_add_epi16(sum0, T22);
+ sum0 = _mm_add_epi16(sum0, T23);
+ T00 = _mm_load_si128((__m128i*)(fenc + (12) * fencstride));
+ T01 = _mm_load_si128((__m128i*)(fenc + (13) * fencstride));
+ T02 = _mm_load_si128((__m128i*)(fenc + (14) * fencstride));
+ T03 = _mm_load_si128((__m128i*)(fenc + (15) * fencstride));
+
+ T10 = _mm_loadu_si128((__m128i*)(fref + (12) * frefstride));
+ T11 = _mm_loadu_si128((__m128i*)(fref + (13) * frefstride));
+ T12 = _mm_loadu_si128((__m128i*)(fref + (14) * frefstride));
+ T13 = _mm_loadu_si128((__m128i*)(fref + (15) * frefstride));
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi32(sum0, T20);
+ sum0 = _mm_add_epi32(sum0, T21);
+ sum0 = _mm_add_epi32(sum0, T22);
+ sum0 = _mm_add_epi32(sum0, T23);
+
+ T00 = _mm_load_si128((__m128i*)(fenc + ((12) * fencstride) + 16));
+ T01 = _mm_load_si128((__m128i*)(fenc + ((13) * fencstride) + 16));
+ T02 = _mm_load_si128((__m128i*)(fenc + ((14) * fencstride) + 16));
+ T03 = _mm_load_si128((__m128i*)(fenc + ((15) * fencstride) + 16));
+
+ T10 = _mm_loadu_si128((__m128i*)(fref + ((12) * frefstride) + 16));
+ T11 = _mm_loadu_si128((__m128i*)(fref + ((13) * frefstride) + 16));
+ T12 = _mm_loadu_si128((__m128i*)(fref + ((14) * frefstride) + 16));
+ T13 = _mm_loadu_si128((__m128i*)(fref + ((15) * frefstride) + 16));
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi32(sum0, T20);
+ sum0 = _mm_add_epi32(sum0, T21);
+ sum0 = _mm_add_epi32(sum0, T22);
+ sum0 = _mm_add_epi32(sum0, T23);
+ }
+ else if ((ly % 8) == 0)
+ {
+ for (int i = 0; i < ly; i += 8)
+ {
+ T00 = _mm_load_si128((__m128i*)(fenc + (i) * fencstride));
+ T01 = _mm_load_si128((__m128i*)(fenc + (i + 1) * fencstride));
+ T02 = _mm_load_si128((__m128i*)(fenc + (i + 2) * fencstride));
+ T03 = _mm_load_si128((__m128i*)(fenc + (i + 3) * fencstride));
+
+ T10 = _mm_loadu_si128((__m128i*)(fref + (i) * frefstride));
+ T11 = _mm_loadu_si128((__m128i*)(fref + (i + 1) * frefstride));
+ T12 = _mm_loadu_si128((__m128i*)(fref + (i + 2) * frefstride));
+ T13 = _mm_loadu_si128((__m128i*)(fref + (i + 3) * frefstride));
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi32(sum0, T20);
+ sum0 = _mm_add_epi32(sum0, T21);
+ sum0 = _mm_add_epi32(sum0, T22);
+ sum0 = _mm_add_epi32(sum0, T23);
+
+ T00 = _mm_load_si128((__m128i*)(fenc + ((i) * fencstride) +
16));
+ T01 = _mm_load_si128((__m128i*)(fenc + ((i + 1) * fencstride)
+ 16));
+ T02 = _mm_load_si128((__m128i*)(fenc + ((i + 2) * fencstride)
+ 16));
+ T03 = _mm_load_si128((__m128i*)(fenc + ((i + 3) * fencstride)
+ 16));
+
+ T10 = _mm_loadu_si128((__m128i*)(fref + ((i) * frefstride) +
16));
+ T11 = _mm_loadu_si128((__m128i*)(fref + ((i + 1) * frefstride)
+ 16));
+ T12 = _mm_loadu_si128((__m128i*)(fref + ((i + 2) * frefstride)
+ 16));
+ T13 = _mm_loadu_si128((__m128i*)(fref + ((i + 3) * frefstride)
+ 16));
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi32(sum0, T20);
+ sum0 = _mm_add_epi32(sum0, T21);
+ sum0 = _mm_add_epi32(sum0, T22);
+ sum0 = _mm_add_epi32(sum0, T23);
+
+ T00 = _mm_load_si128((__m128i*)(fenc + (i + 4) * fencstride));
+ T01 = _mm_load_si128((__m128i*)(fenc + (i + 5) * fencstride));
+ T02 = _mm_load_si128((__m128i*)(fenc + (i + 6) * fencstride));
+ T03 = _mm_load_si128((__m128i*)(fenc + (i + 7) * fencstride));
+
+ T10 = _mm_loadu_si128((__m128i*)(fref + (i + 4) * frefstride));
+ T11 = _mm_loadu_si128((__m128i*)(fref + (i + 5) * frefstride));
+ T12 = _mm_loadu_si128((__m128i*)(fref + (i + 6) * frefstride));
+ T13 = _mm_loadu_si128((__m128i*)(fref + (i + 7) * frefstride));
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi32(sum0, T20);
+ sum0 = _mm_add_epi32(sum0, T21);
+ sum0 = _mm_add_epi32(sum0, T22);
+ sum0 = _mm_add_epi32(sum0, T23);
+
+ T00 = _mm_load_si128((__m128i*)(fenc + ((i + 4) * fencstride)
+ 16));
+ T01 = _mm_load_si128((__m128i*)(fenc + ((i + 5) * fencstride)
+ 16));
+ T02 = _mm_load_si128((__m128i*)(fenc + ((i + 6) * fencstride)
+ 16));
+ T03 = _mm_load_si128((__m128i*)(fenc + ((i + 7) * fencstride)
+ 16));
+
+ T10 = _mm_loadu_si128((__m128i*)(fref + ((i + 4) * frefstride)
+ 16));
+ T11 = _mm_loadu_si128((__m128i*)(fref + ((i + 5) * frefstride)
+ 16));
+ T12 = _mm_loadu_si128((__m128i*)(fref + ((i + 6) * frefstride)
+ 16));
+ T13 = _mm_loadu_si128((__m128i*)(fref + ((i + 7) * frefstride)
+ 16));
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi32(sum0, T20);
+ sum0 = _mm_add_epi32(sum0, T21);
+ sum0 = _mm_add_epi32(sum0, T22);
+ sum0 = _mm_add_epi32(sum0, T23);
+ }
+ }
+ else if ((ly % 4) == 0)
+ {
+ for (int i = 0; i < ly; i += 4)
+ {
+ T00 = _mm_load_si128((__m128i*)(fenc + (i) * fencstride));
+ T01 = _mm_load_si128((__m128i*)(fenc + (i + 1) * fencstride));
+ T02 = _mm_load_si128((__m128i*)(fenc + (i + 2) * fencstride));
+ T03 = _mm_load_si128((__m128i*)(fenc + (i + 3) * fencstride));
+
+ T10 = _mm_loadu_si128((__m128i*)(fref + (i) * frefstride));
+ T11 = _mm_loadu_si128((__m128i*)(fref + (i + 1) * frefstride));
+ T12 = _mm_loadu_si128((__m128i*)(fref + (i + 2) * frefstride));
+ T13 = _mm_loadu_si128((__m128i*)(fref + (i + 3) * frefstride));
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi32(sum0, T20);
+ sum0 = _mm_add_epi32(sum0, T21);
+ sum0 = _mm_add_epi32(sum0, T22);
+ sum0 = _mm_add_epi32(sum0, T23);
+
+ T00 = _mm_load_si128((__m128i*)(fenc + ((i) * fencstride) +
16));
+ T01 = _mm_load_si128((__m128i*)(fenc + ((i + 1) * fencstride)
+ 16));
+ T02 = _mm_load_si128((__m128i*)(fenc + ((i + 2) * fencstride)
+ 16));
+ T03 = _mm_load_si128((__m128i*)(fenc + ((i + 3) * fencstride)
+ 16));
+
+ T10 = _mm_loadu_si128((__m128i*)(fref + ((i + 0) * frefstride)
+ 16));
+ T11 = _mm_loadu_si128((__m128i*)(fref + ((i + 1) * frefstride)
+ 16));
+ T12 = _mm_loadu_si128((__m128i*)(fref + ((i + 2) * frefstride)
+ 16));
+ T13 = _mm_loadu_si128((__m128i*)(fref + ((i + 3) * frefstride)
+ 16));
+
+ T20 = _mm_sad_epu8(T00, T10);
+ T21 = _mm_sad_epu8(T01, T11);
+ T22 = _mm_sad_epu8(T02, T12);
+ T23 = _mm_sad_epu8(T03, T13);
+
+ sum0 = _mm_add_epi32(sum0, T20);
+ sum0 = _mm_add_epi32(sum0, T21);
+ sum0 = _mm_add_epi32(sum0, T22);
+ sum0 = _mm_add_epi32(sum0, T23);
+ }
+ }
+
+ sum1 = _mm_shuffle_epi32(sum0, 2);
+ sum0 = _mm_add_epi32(sum0, sum1);
+
+ return _mm_cvtsi128_si32(sum0);
}
>> Above comments are applied to all three functions and please no more
than one function in a single patch.
template<int size>
Regards,
Praveen
_______________________________________________
x265-devel mailing list
x265-devel at videolan.org
https://mailman.videolan.org/listinfo/x265-devel
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://mailman.videolan.org/pipermail/x265-devel/attachments/20131001/89a48479/attachment-0001.html>
More information about the x265-devel
mailing list