[x265] [PATCH] pixel: replace sse_sp12 vector class with intrinsic

yuvaraj at multicorewareinc.com yuvaraj at multicorewareinc.com
Wed Oct 9 15:33:00 CEST 2013


# HG changeset patch
# User Yuvaraj Venkatesh <yuvaraj at multicorewareinc.com>
# Date 1381325522 -19800
#      Wed Oct 09 19:02:02 2013 +0530
# Node ID e3e711e0f070579f7d97ee02a3283a3a4d0cb6a2
# Parent  ae06d8100683c574fc5d0b995ac308d24e89908e
pixel: replace sse_sp12 vector class with intrinsic

diff -r ae06d8100683 -r e3e711e0f070 source/common/vec/sse.inc
--- a/source/common/vec/sse.inc	Wed Oct 09 18:26:17 2013 +0530
+++ b/source/common/vec/sse.inc	Wed Oct 09 19:02:02 2013 +0530
@@ -484,31 +484,46 @@
 template<int ly>
 int sse_sp12(short* fenc, intptr_t strideFenc, pixel* fref, intptr_t strideFref)
 {
-    int rows = ly;
-    Vec8s m1;
-    Vec16uc n1;
+    __m128i sum0 = _mm_setzero_si128();
 
-    Vec8us diff_low(0);
-    Vec4i diff_high(0);
-    Vec4i sum_low(0), sum_high(0);
-    for (; rows != 0; rows--)
+    for(int i = 0; i < ly; i++)
     {
-        n1.load(fref);
-        n1.cutoff(12);
-        m1.load(fenc);
-        diff_low = m1 - extend_low(n1);
-        m1.load(fenc + 8);
-        diff_high = extend_low(m1) - extend_low(extend_high(n1));
-        diff_low = diff_low * diff_low;
-        diff_high = diff_high * diff_high;
-        sum_low += extend_low(diff_low);
-        sum_high += (extend_high(diff_low) + diff_high);
+        __m128i T00, T01;
+        __m128i T10, T11, T12, T13;
+        T00 = _mm_loadu_si128((__m128i*)(fenc));
+        T01 = _mm_loadu_si128((__m128i*)(fref));
+        T01 = _mm_srli_si128(_mm_slli_si128(T01, 4), 4);    //masking last 4 8-bit integers
+
+        T10 = _mm_unpacklo_epi16(T00, _mm_setzero_si128());
+        T11 = _mm_unpacklo_epi8(T01, _mm_setzero_si128());
+        T11 = _mm_unpacklo_epi16(T11, _mm_setzero_si128());
+        T12 = _mm_sub_epi32(T10, T11);
+        T13 = _mm_mullo_epi32(T12, T12);
+        sum0 = _mm_add_epi32(sum0, T13);
+
+        T10 = _mm_unpackhi_epi16(T00, _mm_setzero_si128());
+        T11 = _mm_unpacklo_epi8(T01, _mm_setzero_si128());
+        T11 = _mm_unpackhi_epi16(T11, _mm_setzero_si128());
+        T12 = _mm_sub_epi32(T10, T11);
+        T13 = _mm_mullo_epi32(T12, T12);
+        sum0 = _mm_add_epi32(sum0, T13);
+
+        T00 = _mm_loadu_si128((__m128i*)(fenc + 8));
+
+        T10 = _mm_unpacklo_epi16(T00, _mm_setzero_si128());
+        T11 = _mm_unpackhi_epi8(T01, _mm_setzero_si128());
+        T11 = _mm_unpacklo_epi16(T11, _mm_setzero_si128());
+        T12 = _mm_sub_epi32(T10, T11);
+        T13 = _mm_mullo_epi32(T12, T12);
+        sum0 = _mm_add_epi32(sum0, T13);
 
         fenc += strideFenc;
         fref += strideFref;
     }
+    sum0 = _mm_hadd_epi32(sum0, _mm_setzero_si128());
+    sum0 = _mm_hadd_epi32(sum0, _mm_setzero_si128());
 
-    return horizontal_add(sum_low) + horizontal_add(sum_high);
+    return _mm_cvtsi128_si32(sum0);
 }
 
 template<int ly>


More information about the x265-devel mailing list