[x265] [PATCH] sse.inc: Replace sse_ss48 vector class function with intrinsic
dnyaneshwar at multicorewareinc.com
dnyaneshwar at multicorewareinc.com
Wed Oct 9 13:59:49 CEST 2013
# HG changeset patch
# User Dnyaneshwar Gorade <dnyaneshwar at multicorewareinc.com>
# Date 1381319957 -19800
# Wed Oct 09 17:29:17 2013 +0530
# Node ID 933485037b324449660bb74192e1b889d861d08e
# Parent cb9f29e64e64403a3d4c9c01fa55736b867f720e
sse.inc: Replace sse_ss48 vector class function with intrinsic.
diff -r cb9f29e64e64 -r 933485037b32 source/common/vec/sse.inc
--- a/source/common/vec/sse.inc Wed Oct 09 17:25:23 2013 +0530
+++ b/source/common/vec/sse.inc Wed Oct 09 17:29:17 2013 +0530
@@ -307,70 +307,75 @@
{
int rows = ly;
- Vec4i diff(0);
- Vec8s m1, n1;
- Vec4i sum(0);
+ __m128i sum = _mm_setzero_si128();
+ __m128i m1, n1, diff, sign1, sign2, tmp1, tmp2;
+
+#define PROCESS_SSE_SS48x1(BASE)\
+ m1 = _mm_loadu_si128((__m128i const*)(fenc + BASE)); \
+ n1 = _mm_loadu_si128((__m128i const*)(fref + BASE)); \
+ sign1 = _mm_srai_epi16(m1, 15); \
+ tmp1 = _mm_unpacklo_epi16(m1, sign1); \
+ sign2 = _mm_srai_epi16(n1, 15); \
+ tmp2 = _mm_unpacklo_epi16(n1, sign2); \
+ diff = _mm_sub_epi32(tmp1, tmp2); \
+ diff = _mm_mullo_epi32(diff, diff); \
+ sum = _mm_add_epi32(sum, diff)
+
for (; rows != 0; rows--)
{
- m1.load(fenc);
- n1.load(fref);
- diff = extend_low(m1) - extend_low(n1);
- diff = diff * diff;
- sum += diff;
- diff = extend_high(m1) - extend_high(n1);
- diff = diff * diff;
- sum += diff;
+ PROCESS_SSE_SS48x1(0);
- m1.load(fenc + 8);
- n1.load(fref + 8);
- diff = extend_low(m1) - extend_low(n1);
- diff = diff * diff;
- sum += diff;
- diff = extend_high(m1) - extend_high(n1);
- diff = diff * diff;
- sum += diff;
+ m1 = _mm_unpackhi_epi16(m1, sign1);
+ n1 = _mm_unpackhi_epi16(n1, sign2);
+ diff = _mm_sub_epi32(m1, n1);
+ diff = _mm_mullo_epi32(diff, diff);
+ sum = _mm_add_epi32(sum, diff);
- m1.load(fenc + 16);
- n1.load(fref + 16);
- diff = extend_low(m1) - extend_low(n1);
- diff = diff * diff;
- sum += diff;
- diff = extend_high(m1) - extend_high(n1);
- diff = diff * diff;
- sum += diff;
+ PROCESS_SSE_SS48x1(8);
- m1.load(fenc + 24);
- n1.load(fref + 24);
- diff = extend_low(m1) - extend_low(n1);
- diff = diff * diff;
- sum += diff;
- diff = extend_high(m1) - extend_high(n1);
- diff = diff * diff;
- sum += diff;
+ m1 = _mm_unpackhi_epi16(m1, sign1);
+ n1 = _mm_unpackhi_epi16(n1, sign2);
+ diff = _mm_sub_epi32(m1, n1);
+ diff = _mm_mullo_epi32(diff, diff);
+ sum = _mm_add_epi32(sum, diff);
- m1.load(fenc + 32);
- n1.load(fref + 32);
- diff = extend_low(m1) - extend_low(n1);
- diff = diff * diff;
- sum += diff;
- diff = extend_high(m1) - extend_high(n1);
- diff = diff * diff;
- sum += diff;
+ PROCESS_SSE_SS48x1(16);
- m1.load(fenc + 40);
- n1.load(fref + 40);
- diff = extend_low(m1) - extend_low(n1);
- diff = diff * diff;
- sum += diff;
- diff = extend_high(m1) - extend_high(n1);
- diff = diff * diff;
- sum += diff;
+ m1 = _mm_unpackhi_epi16(m1, sign1);
+ n1 = _mm_unpackhi_epi16(n1, sign2);
+ diff = _mm_sub_epi32(m1, n1);
+ diff = _mm_mullo_epi32(diff, diff);
+ sum = _mm_add_epi32(sum, diff);
+
+ PROCESS_SSE_SS48x1(24);
+
+ m1 = _mm_unpackhi_epi16(m1, sign1);
+ n1 = _mm_unpackhi_epi16(n1, sign2);
+ diff = _mm_sub_epi32(m1, n1);
+ diff = _mm_mullo_epi32(diff, diff);
+ sum = _mm_add_epi32(sum, diff);
+
+ PROCESS_SSE_SS48x1(32);
+
+ m1 = _mm_unpackhi_epi16(m1, sign1);
+ n1 = _mm_unpackhi_epi16(n1, sign2);
+ diff = _mm_sub_epi32(m1, n1);
+ diff = _mm_mullo_epi32(diff, diff);
+ sum = _mm_add_epi32(sum, diff);
+
+ PROCESS_SSE_SS48x1(40);
+
+ m1 = _mm_unpackhi_epi16(m1, sign1);
+ n1 = _mm_unpackhi_epi16(n1, sign2);
+ diff = _mm_sub_epi32(m1, n1);
+ diff = _mm_mullo_epi32(diff, diff);
+ sum = _mm_add_epi32(sum, diff);
fenc += strideFenc;
fref += strideFref;
}
-
- return horizontal_add(sum);
+ __m128i sum1 = _mm_hadd_epi32(sum, sum); // horizontally add 4 elements
+ return _mm_cvtsi128_si32(_mm_hadd_epi32(sum1, sum1));
}
template<int ly>
More information about the x265-devel
mailing list