[x265] [PATCH] psyCost_ss: optimize psyCost_ss_8x8, suitable for ASM conversion
dnyaneshwar at multicorewareinc.com
dnyaneshwar at multicorewareinc.com
Fri Dec 12 12:34:50 CET 2014
# HG changeset patch
# User Dnyaneshwar G <dnyaneshwar at multicorewareinc.com>
# Date 1418384066 -19800
# Fri Dec 12 17:04:26 2014 +0530
# Node ID e9434e5fcc08c38741961b37277090ba2430719f
# Parent fb2bf6fe6499af2a471b74520692fe60c00a9423
psyCost_ss: optimize psyCost_ss_8x8, suitable for ASM conversion
combined sa8d_8x8 and sad_8x8 together to save redundant loads, removed unnecessary zeroBuffer
diff -r fb2bf6fe6499 -r e9434e5fcc08 source/common/pixel.cpp
--- a/source/common/pixel.cpp Fri Dec 12 16:39:44 2014 +0530
+++ b/source/common/pixel.cpp Fri Dec 12 17:04:26 2014 +0530
@@ -427,6 +427,60 @@
}
}
+void psy_acEnergy_ss_8x8(const int16_t* src, intptr_t stride, int* energy, int dim)
+{
+ int n = 0;
+ const int16_t* tmpSrc = src;
+
+ for (int k = 0; k < dim; k += 8)
+ {
+ for (int j = 0; j < dim; j += 8)
+ {
+ src = tmpSrc + k * stride + j;
+ ssum2_t tmp[8][4];
+ ssum2_t a0, a1, a2, a3, a4, a5, a6, a7, b0, b1, b2, b3;
+ ssum2_t sum = 0, sum1 = 0;
+
+ for (int i = 0; i < 8; i++, src += stride)
+ {
+ a0 = src[0];
+ a1 = src[1];
+ sum1 += abs(a0) + abs(a1);
+ b0 = (a0 + a1) + ((a0 - a1) << BITS_PER_SUM);
+ a2 = src[2];
+ a3 = src[3];
+ sum1 += abs(a2) + abs(a3);
+ b1 = (a2 + a3) + ((a2 - a3) << BITS_PER_SUM);
+ a4 = src[4];
+ a5 = src[5];
+ sum1 += abs(a4) + abs(a5);
+ b2 = (a4 + a5) + ((a4 - a5) << BITS_PER_SUM);
+ a6 = src[6];
+ a7 = src[7];
+ sum1 += abs(a6) + abs(a7);
+ b3 = (a6 + a7) + ((a6 - a7) << BITS_PER_SUM);
+ HADAMARD4(tmp[i][0], tmp[i][1], tmp[i][2], tmp[i][3], b0, b1, b2, b3);
+ }
+
+ for (int i = 0; i < 4; i++)
+ {
+ HADAMARD4(a0, a1, a2, a3, tmp[0][i], tmp[1][i], tmp[2][i], tmp[3][i]);
+ HADAMARD4(a4, a5, a6, a7, tmp[4][i], tmp[5][i], tmp[6][i], tmp[7][i]);
+ b0 = abs2(a0 + a4) + abs2(a0 - a4);
+ b0 += abs2(a1 + a5) + abs2(a1 - a5);
+ b0 += abs2(a2 + a6) + abs2(a2 - a6);
+ b0 += abs2(a3 + a7) + abs2(a3 - a7);
+ sum += (sum_t)b0 + (b0 >> BITS_PER_SUM);
+ }
+
+ sum = (int)((sum + 2) >> 2);
+ sum1 >>= 2;
+
+ energy[n++] = (sum - sum1);
+ }
+ }
+}
+
inline int _sa8d_8x8(const int16_t* pix1, intptr_t i_pix1, const int16_t* pix2, intptr_t i_pix2)
{
ssum2_t tmp[8][4];
@@ -947,25 +1001,24 @@
template<int size>
int psyCost_ss(const int16_t* source, intptr_t sstride, const int16_t* recon, intptr_t rstride)
{
- static int16_t zeroBuf[8] /* = { 0 } */;
-
if (size)
{
- int dim = 1 << (size + 2);
+ int dim = 1 << (size + 2);
+ int bufSize = dim >> (4 - size);
uint32_t totEnergy = 0;
- for (int i = 0; i < dim; i += 8)
- {
- for (int j = 0; j < dim; j+= 8)
- {
- /* AC energy, measured by sa8d (AC + DC) minus SAD (DC) */
- int sourceEnergy = sa8d_8x8(source + i * sstride + j, sstride, zeroBuf, 0) -
- (sad<8, 8>(source + i * sstride + j, sstride, zeroBuf, 0) >> 2);
- int reconEnergy = sa8d_8x8(recon + i * rstride + j, rstride, zeroBuf, 0) -
- (sad<8, 8>(recon + i * rstride + j, rstride, zeroBuf, 0) >> 2);
+ int* sourceEnergy = X265_MALLOC(int, bufSize);
+ int* reconEnergy = X265_MALLOC(int, bufSize);
- totEnergy += abs(sourceEnergy - reconEnergy);
- }
- }
+ /* AC energy, measured by sa8d (AC + DC) minus SAD (DC) */
+ psy_acEnergy_ss_8x8(source, sstride, sourceEnergy, dim);
+ psy_acEnergy_ss_8x8(recon, rstride, reconEnergy, dim);
+
+ for (int i = 0; i < bufSize; i++)
+ totEnergy += abs(sourceEnergy[i] - reconEnergy[i]);
+
+ X265_FREE(sourceEnergy);
+ X265_FREE(reconEnergy);
+
return totEnergy;
}
else
More information about the x265-devel
mailing list