<div dir="ltr">Divya will send a corrected patch for this, the intermediate values can stay as 32-bit. <br></div><div class="gmail_extra"><br><div class="gmail_quote">On Thu, Jan 8, 2015 at 2:01 PM, Divya Manivannan <span dir="ltr"><<a href="mailto:divya@multicorewareinc.com" target="_blank">divya@multicorewareinc.com</a>></span> wrote:<br><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex"># HG changeset patch<br>
# User Divya Manivannan <<a href="mailto:divya@multicorewareinc.com">divya@multicorewareinc.com</a>><br>
# Date 1420705817 -19800<br>
# Thu Jan 08 14:00:17 2015 +0530<br>
# Node ID 188e42417b37cc5ab473f8ba51a351f4fd663082<br>
# Parent 6dce2b87f0fe4aa37f9c7d66ec99447919b19c64<br>
fix bug in satd_4x4 for psyCost_ss<br>
<br>
diff -r 6dce2b87f0fe -r 188e42417b37 source/common/pixel.cpp<br>
--- a/source/common/pixel.cpp Thu Jan 08 10:29:09 2015 +0530<br>
+++ b/source/common/pixel.cpp Thu Jan 08 14:00:17 2015 +0530<br>
@@ -241,32 +241,35 @@<br>
return (int)(sum >> 1);<br>
}<br>
<br>
-int satd_4x4(const int16_t* pix1, intptr_t stride_pix1, const int16_t* pix2, intptr_t stride_pix2)<br>
+static int satd_4x4(const int16_t* pix1, intptr_t stride_pix1)<br>
{<br>
- int64_t tmp[4][2];<br>
- int64_t a0, a1, a2, a3, b0, b1;<br>
- int64_t sum = 0;<br>
-<br>
- for (int i = 0; i < 4; i++, pix1 += stride_pix1, pix2 += stride_pix2)<br>
- {<br>
- a0 = pix1[0] - pix2[0];<br>
- a1 = pix1[1] - pix2[1];<br>
- b0 = (a0 + a1) + ((a0 - a1) << BITS_PER_SUM);<br>
- a2 = pix1[2] - pix2[2];<br>
- a3 = pix1[3] - pix2[3];<br>
- b1 = (a2 + a3) + ((a2 - a3) << BITS_PER_SUM);<br>
- tmp[i][0] = b0 + b1;<br>
- tmp[i][1] = b0 - b1;<br>
- }<br>
-<br>
- for (int i = 0; i < 2; i++)<br>
- {<br>
- HADAMARD4(a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i]);<br>
- a0 = abs2(a0) + abs2(a1) + abs2(a2) + abs2(a3);<br>
- sum += ((sum_t)a0) + (a0 >> BITS_PER_SUM);<br>
- }<br>
-<br>
- return (int)(sum >> 1);<br>
+ int64_t tmp[4][4];<br>
+ int64_t s01, s23, d01, d23;<br>
+ int64_t satd = 0;<br>
+ int d;<br>
+<br>
+ for (d = 0; d < 4; d++, pix1 += stride_pix1)<br>
+ {<br>
+ s01 = pix1[0] + pix1[1];<br>
+ s23 = pix1[2] + pix1[3];<br>
+ d01 = pix1[0] - pix1[1];<br>
+ d23 = pix1[2] - pix1[3];<br>
+<br>
+ tmp[d][0] = s01 + s23;<br>
+ tmp[d][1] = s01 - s23;<br>
+ tmp[d][2] = d01 - d23;<br>
+ tmp[d][3] = d01 + d23;<br>
+ }<br>
+<br>
+ for (d = 0; d < 4; d++)<br>
+ {<br>
+ s01 = tmp[0][d] + tmp[1][d];<br>
+ s23 = tmp[2][d] + tmp[3][d];<br>
+ d01 = tmp[0][d] - tmp[1][d];<br>
+ d23 = tmp[2][d] - tmp[3][d];<br>
+ satd += abs(s01 + s23) + abs(s01 - s23) + abs(d01 - d23) + abs(d01 + d23);<br>
+ }<br>
+ return (int)(satd / 2);<br>
}<br>
<br>
// x264's SWAR version of satd 8x4, performs two 4x4 SATDs at once<br>
@@ -832,8 +835,8 @@<br>
else<br>
{<br>
/* 4x4 is too small for sa8d */<br>
- int sourceEnergy = satd_4x4(source, sstride, zeroBuf, 0) - (sad<4, 4>(source, sstride, zeroBuf, 0) >> 2);<br>
- int reconEnergy = satd_4x4(recon, rstride, zeroBuf, 0) - (sad<4, 4>(recon, rstride, zeroBuf, 0) >> 2);<br>
+ int sourceEnergy = satd_4x4(source, sstride) - (sad<4, 4>(source, sstride, zeroBuf, 0) >> 2);<br>
+ int reconEnergy = satd_4x4(recon, rstride) - (sad<4, 4>(recon, rstride, zeroBuf, 0) >> 2);<br>
return abs(sourceEnergy - reconEnergy);<br>
}<br>
}<br>
_______________________________________________<br>
x265-devel mailing list<br>
<a href="mailto:x265-devel@videolan.org">x265-devel@videolan.org</a><br>
<a href="https://mailman.videolan.org/listinfo/x265-devel" target="_blank">https://mailman.videolan.org/listinfo/x265-devel</a><br>
</blockquote></div><br></div>