<div dir="ltr"><br><div class="gmail_extra"><br><br><div class="gmail_quote">On Tue, Oct 8, 2013 at 4:20 AM,  <span dir="ltr"><<a href="mailto:yuvaraj@multicorewareinc.com" target="_blank">yuvaraj@multicorewareinc.com</a>></span> wrote:<br>
<blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex"># HG changeset patch<br>
# User Yuvaraj Venkatesh <<a href="mailto:yuvaraj@multicorewareinc.com">yuvaraj@multicorewareinc.com</a>><br>
# Date 1381223945 -19800<br>
#      Tue Oct 08 14:49:05 2013 +0530<br>
# Node ID 23f4e0a507a6be19fceb4a2525aeb2a5fae5e1ab<br>
# Parent  1a62566488b7ece9bbfb665e37ac402a08ce156e<br>
pixel: replace getResidual64 from vector class to intrinsic<br>
<br>
diff -r 1a62566488b7 -r 23f4e0a507a6 source/common/vec/pixel8.inc<br>
--- a/source/common/vec/pixel8.inc      Tue Oct 08 14:33:26 2013 +0530<br>
+++ b/source/common/vec/pixel8.inc      Tue Oct 08 14:49:05 2013 +0530<br>
@@ -112,46 +112,41 @@<br>
     }<br>
 }<br>
<br>
-void getResidual64(pixel *fenc, pixel *pred, short *resi, int stride)<br>
-{<br>
-    Vec16uc f, p;<br>
-    Vec8s r;<br>
+void getResidual64(pixel *fenc, pixel *pred, short *resi, int stride)<br>
+{<br>
+    __m128i T00, T01, T02, T03, T04;<br>
+<br>
+#define RESIDUAL_64x4(BASE, OFFSET) \<br>
+    T00 = _mm_load_si128((__m128i*)(fenc + OFFSET + (BASE + 0) * stride)); \<br>
+    T01 = _mm_load_si128((__m128i*)(pred + OFFSET + (BASE + 0) * stride)); \<br>
+    T02 = _mm_unpacklo_epi8(T00, _mm_setzero_si128()); \<br>
+    T03 = _mm_unpacklo_epi8(T01, _mm_setzero_si128()); \<br>
+    T04 = _mm_sub_epi16(T02, T03); \<br>
+    _mm_store_si128((__m128i*)(resi + OFFSET + (BASE + 0) * stride), T04); \<br>
+    T02 = _mm_unpackhi_epi8(T00, _mm_setzero_si128()); \<br>
+    T03 = _mm_unpackhi_epi8(T01, _mm_setzero_si128()); \<br>
+    T04 = _mm_sub_epi16(T02, T03); \<br>
+    _mm_store_si128((__m128i*)(resi + 8 + OFFSET + (BASE + 0) * stride), T04); \<br>
+    T00 = _mm_load_si128((__m128i*)(fenc + OFFSET + (BASE + 1) * stride)); \<br>
+    T01 = _mm_load_si128((__m128i*)(pred + OFFSET + (BASE + 1) * stride)); \<br>
+    T02 = _mm_unpacklo_epi8(T00, _mm_setzero_si128()); \<br>
+    T03 = _mm_unpacklo_epi8(T01, _mm_setzero_si128()); \<br>
+    T04 = _mm_sub_epi16(T02, T03); \<br>
+    _mm_store_si128((__m128i*)(resi + OFFSET + (BASE + 1) * stride), T04); \<br>
+    T02 = _mm_unpackhi_epi8(T00, _mm_setzero_si128()); \<br>
+    T03 = _mm_unpackhi_epi8(T01, _mm_setzero_si128()); \<br>
+    T04 = _mm_sub_epi16(T02, T03); \<br>
+    _mm_store_si128((__m128i*)(resi + 8 + OFFSET + (BASE + 1) * stride), T04)<br>
+<br>
+    for (int i = 0; i < 64; i += 2)<br>
+    {<br>
+        RESIDUAL_64x4(i, 0);<br>
+        RESIDUAL_64x4(i, 16);<br>
+        RESIDUAL_64x4(i, 32);<br>
+        RESIDUAL_64x4(i, 48);<br></blockquote><div><br></div><div>And this one can use the same RESIDUAL_2x16 macro as the 32x32 primitive</div><div> </div><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">

+    }<br>
+}<br>
<br>
-    for (int y = 0; y < 64; y++)<br>
-    {<br>
-        f.load_a(fenc);<br>
-        p.load_a(pred);<br>
-        r = extend_low(f) - extend_low(p);<br>
-        r.store(resi);<br>
-        r = extend_high(f) - extend_high(p);<br>
-        r.store(resi + 8);<br>
-<br>
-        f.load_a(fenc + 16);<br>
-        p.load_a(pred + 16);<br>
-        r = extend_low(f) - extend_low(p);<br>
-        r.store(resi + 16);<br>
-        r = extend_high(f) - extend_high(p);<br>
-        r.store(resi + 24);<br>
-<br>
-        f.load_a(fenc + 32);<br>
-        p.load_a(pred + 32);<br>
-        r = extend_low(f) - extend_low(p);<br>
-        r.store(resi + 32);<br>
-        r = extend_high(f) - extend_high(p);<br>
-        r.store(resi + 40);<br>
-<br>
-        f.load_a(fenc + 48);<br>
-        p.load_a(pred + 48);<br>
-        r = extend_low(f) - extend_low(p);<br>
-        r.store(resi + 48);<br>
-        r = extend_high(f) - extend_high(p);<br>
-        r.store(resi + 56);<br>
-<br>
-        fenc += stride;<br>
-        pred += stride;<br>
-        resi += stride;<br>
-    }<br>
-}<br>
<br>
 void calcRecons4(pixel* pPred, short* pResi, pixel* pReco, short* pRecQt, pixel* pRecIPred, int stride, int recstride, int ipredstride)<br>
 {<br>
_______________________________________________<br>
x265-devel mailing list<br>
<a href="mailto:x265-devel@videolan.org">x265-devel@videolan.org</a><br>
<a href="https://mailman.videolan.org/listinfo/x265-devel" target="_blank">https://mailman.videolan.org/listinfo/x265-devel</a><br>
</blockquote></div><br><br clear="all"><div><br></div>-- <br>Steve Borho
</div></div>