[x265] [PATCH] ipfilter*.inc : Cleanup function names and variables
gopu at multicorewareinc.com
gopu at multicorewareinc.com
Tue Jul 9 08:59:18 CEST 2013
# HG changeset patch
# User ggopu
# Date 1373353143 -19800
# Node ID 8b7b62ccda5d358ccb612f9016c3139e47ac03e4
# Parent d5d5a3f73738b330b7c3345f8da9ea1ec3b1fd6e
ipfilter*.inc : Cleanup function names and variables
diff -r d5d5a3f73738 -r 8b7b62ccda5d source/common/vec/ipfilter.inc
--- a/source/common/vec/ipfilter.inc Tue Jul 09 11:49:47 2013 +0530
+++ b/source/common/vec/ipfilter.inc Tue Jul 09 12:29:03 2013 +0530
@@ -42,15 +42,15 @@
namespace x265 {
void NAME(Setup_Vec_IPFilterPrimitives)(EncoderPrimitives& p)
{
- p.ipfilter_pp[FILTER_H_P_P_4] = filterHorizontal_pel_pel<4>;
- p.ipfilter_pp[FILTER_H_P_P_8] = filterHorizontal_pel_pel<8>;
- p.ipfilter_ps[FILTER_H_P_S_4] = filterHorizontal_pel_short<4>;
- p.ipfilter_ps[FILTER_H_P_S_8] = filterHorizontal_pel_short<8>;
+ p.ipfilter_pp[FILTER_H_P_P_4] = filterHorizontal_p_p<4>;
+ p.ipfilter_pp[FILTER_H_P_P_8] = filterHorizontal_p_p<8>;
+ p.ipfilter_ps[FILTER_H_P_S_4] = filterHorizontal_p_s<4>;
+ p.ipfilter_ps[FILTER_H_P_S_8] = filterHorizontal_p_s<8>;
- p.ipfilter_sp[FILTER_V_S_P_8] = filterVertical_short_pel<8>;
- p.ipfilter_sp[FILTER_V_S_P_4] = filterVertical_short_pel<4>;
- p.ipfilter_pp[FILTER_V_P_P_8] = filterVertical_pel_pel<8>;
- p.ipfilter_pp[FILTER_V_P_P_4] = filterVertical_pel_pel<4>;
+ p.ipfilter_sp[FILTER_V_S_P_8] = filterVertical_s_p<8>;
+ p.ipfilter_sp[FILTER_V_S_P_4] = filterVertical_s_p<4>;
+ p.ipfilter_pp[FILTER_V_P_P_8] = filterVertical_p_p<8>;
+ p.ipfilter_pp[FILTER_V_P_P_4] = filterVertical_p_p<4>;
p.ipfilter_p2s = filterConvertPelToShort;
p.ipfilter_s2p = filterConvertShortToPel;
diff -r d5d5a3f73738 -r 8b7b62ccda5d source/common/vec/ipfilter16.inc
--- a/source/common/vec/ipfilter16.inc Tue Jul 09 11:49:47 2013 +0530
+++ b/source/common/vec/ipfilter16.inc Tue Jul 09 12:29:03 2013 +0530
@@ -29,13 +29,13 @@
#define IF_INTERNAL_OFFS (1 << (IF_INTERNAL_PREC - 1)) ///< Offset used internally
template<int N>
-void filterVertical_short_pel(int bitDepth, short *src, int srcStride, pixel *dst, int dstStride, int block_width, int block_height, short const *coeff)
+void filterVertical_s_p(int bitDepth, short *src, int srcStride, pixel *dst, int dstStride, int block_width, int block_height, short const *coeff)
{
int row, col;
- int cstride = srcStride;
+ int stride = srcStride;
- src -= (N / 2 - 1) * cstride;
+ src -= (N / 2 - 1) * stride;
int offset;
short maxVal;
@@ -83,8 +83,8 @@
sum_first = row0_first + row1_first;
sum_last = row0_last + row1_last;
- row2.load(&src[col + 2 * cstride]);
- row3.load(&src[col + 3 * cstride]);
+ row2.load(&src[col + 2 * stride]);
+ row3.load(&src[col + 3 * stride]);
c2.load(cm[2]);
c3.load(cm[3]);
@@ -102,8 +102,8 @@
if (N == 8)
{
- row4.load(&src[col + 4 * cstride]);
- row5.load(&src[col + 5 * cstride]);
+ row4.load(&src[col + 4 * stride]);
+ row5.load(&src[col + 5 * stride]);
c4.load(cm[4]);
c5.load(cm[5]);
@@ -119,8 +119,8 @@
sum_first += row0_first + row1_first;
sum_last += row0_last + row1_last;
- row6.load(&src[col + 6 * cstride]);
- row7.load(&src[col + 7 * cstride]);
+ row6.load(&src[col + 6 * stride]);
+ row7.load(&src[col + 7 * stride]);
c6.load(cm[6]);
c7.load(cm[7]);
@@ -158,7 +158,7 @@
Vec4i c0, c1, c2, c3, c4, c5, c6, c7;
row0.load(&src[col]);
- row1.load(&src[col + cstride]);
+ row1.load(&src[col + stride]);
c0.load(cm[0]);
c1.load(cm[1]);
@@ -170,8 +170,8 @@
sum_first = row0_first + row1_first;
- row2.load(&src[col + 2 * cstride]);
- row3.load(&src[col + 3 * cstride]);
+ row2.load(&src[col + 2 * stride]);
+ row3.load(&src[col + 3 * stride]);
c2.load(cm[2]);
c3.load(cm[3]);
@@ -184,8 +184,8 @@
if (N == 8)
{
- row4.load(&src[col + 4 * cstride]);
- row5.load(&src[col + 5 * cstride]);
+ row4.load(&src[col + 4 * stride]);
+ row5.load(&src[col + 5 * stride]);
c4.load(cm[4]);
c5.load(cm[5]);
@@ -196,8 +196,8 @@
row1_first = row1_first * c5;
sum_first += row0_first + row1_first;
- row6.load(&src[col + 6 * cstride]);
- row7.load(&src[col + 7 * cstride]);
+ row6.load(&src[col + 6 * stride]);
+ row7.load(&src[col + 7 * stride]);
c6.load(cm[6]);
c7.load(cm[7]);
@@ -227,13 +227,13 @@
}
template<int N>
-void filterVertical_pel_pel(int bitDepth, pixel *src, int srcStride, pixel *dst, int dstStride, int block_width, int block_height, short const *coeff)
+void filterVertical_p_p(int bitDepth, pixel *src, int srcStride, pixel *dst, int dstStride, int block_width, int block_height, short const *coeff)
{
int row, col;
- int cstride = srcStride;
+ int stride = srcStride;
- src -= (N / 2 - 1) * cstride;
+ src -= (N / 2 - 1) * stride;
int offset;
short maxVal;
@@ -261,7 +261,7 @@
Vec4i c0, c1, c2, c3, c4, c5, c6, c7;
row0.load(&src[col]);
- row1.load(&src[col + cstride]);
+ row1.load(&src[col + stride]);
c0.load(cm[0]);
c1.load(cm[1]);
@@ -279,8 +279,8 @@
sum_first = row0_first + row1_first;
sum_last = row0_last + row1_last;
- row2.load(&src[col + 2 * cstride]);
- row3.load(&src[col + 3 * cstride]);
+ row2.load(&src[col + 2 * stride]);
+ row3.load(&src[col + 3 * stride]);
c2.load(cm[2]);
c3.load(cm[3]);
@@ -298,8 +298,8 @@
if (N == 8)
{
- row4.load(&src[col + 4 * cstride]);
- row5.load(&src[col + 5 * cstride]);
+ row4.load(&src[col + 4 * stride]);
+ row5.load(&src[col + 5 * stride]);
c4.load(cm[4]);
c5.load(cm[5]);
@@ -315,8 +315,8 @@
sum_first += row0_first + row1_first;
sum_last += row0_last + row1_last;
- row6.load(&src[col + 6 * cstride]);
- row7.load(&src[col + 7 * cstride]);
+ row6.load(&src[col + 6 * stride]);
+ row7.load(&src[col + 7 * stride]);
c6.load(cm[6]);
c7.load(cm[7]);
@@ -354,7 +354,7 @@
Vec4i c0, c1, c2, c3, c4, c5, c6, c7;
row0.load(&src[col]);
- row1.load(&src[col + cstride]);
+ row1.load(&src[col + stride]);
c0.load(cm[0]);
c1.load(cm[1]);
@@ -366,8 +366,8 @@
sum_first = row0_first + row1_first;
- row2.load(&src[col + 2 * cstride]);
- row3.load(&src[col + 3 * cstride]);
+ row2.load(&src[col + 2 * stride]);
+ row3.load(&src[col + 3 * stride]);
c2.load(cm[2]);
c3.load(cm[3]);
@@ -380,8 +380,8 @@
if (N == 8)
{
- row4.load(&src[col + 4 * cstride]);
- row5.load(&src[col + 5 * cstride]);
+ row4.load(&src[col + 4 * stride]);
+ row5.load(&src[col + 5 * stride]);
c4.load(cm[4]);
c5.load(cm[5]);
@@ -392,8 +392,8 @@
row1_first = row1_first * c5;
sum_first += row0_first + row1_first;
- row6.load(&src[col + 6 * cstride]);
- row7.load(&src[col + 7 * cstride]);
+ row6.load(&src[col + 6 * stride]);
+ row7.load(&src[col + 7 * stride]);
c6.load(cm[6]);
c7.load(cm[7]);
@@ -423,7 +423,7 @@
}
template<int N>
-void filterHorizontal_pel_pel(int bitDepth, pixel *src, int srcStride, pixel *dst, int dstStride, int block_width, int block_height, short const *coeff)
+void filterHorizontal_p_p(int bitDepth, pixel *src, int srcStride, pixel *dst, int dstStride, int block_width, int block_height, short const *coeff)
{
int row, col;
@@ -529,7 +529,7 @@
}
template<int N>
-void filterHorizontal_pel_short(int bitDepth, pixel *src, int srcStride, short *dst, int dstStride, int block_width, int block_height, short const *coeff)
+void filterHorizontal_p_s(int bitDepth, pixel *src, int srcStride, short *dst, int dstStride, int block_width, int block_height, short const *coeff)
{
int row, col;
diff -r d5d5a3f73738 -r 8b7b62ccda5d source/common/vec/ipfilter8.inc
--- a/source/common/vec/ipfilter8.inc Tue Jul 09 11:49:47 2013 +0530
+++ b/source/common/vec/ipfilter8.inc Tue Jul 09 12:29:03 2013 +0530
@@ -29,12 +29,12 @@
#define IF_INTERNAL_OFFS (1 << (IF_INTERNAL_PREC - 1)) ///< Offset used internally
template<int N>
-void filterVertical_short_pel(int bitDepth, short *src, int srcStride, pixel *dst, int dstStride, int block_width, int block_height, short const *coeff)
+void filterVertical_s_p(int bitDepth, short *src, int srstride, pixel *dst, int dstStride, int block_width, int block_height, short const *coeff)
{
int row, col;
- int cstride = srcStride;
+ int stride = srstride;
- src -= (N / 2 - 1) * cstride;
+ src -= (N / 2 - 1) * stride;
int offset;
short maxVal;
int headRoom = IF_INTERNAL_PREC - bitDepth;
@@ -56,7 +56,7 @@
Vec4i c0, c1, c2, c3, c4, c5, c6, c7;
row0.load(&src[col]);
- row1.load(&src[col + cstride]);
+ row1.load(&src[col + stride]);
c0 = cm0;
c1 = cm1;
@@ -74,8 +74,8 @@
sum_first = row0_first + row1_first;
sum_last = row0_last + row1_last;
- row2.load(&src[col + 2 * cstride]);
- row3.load(&src[col + 3 * cstride]);
+ row2.load(&src[col + 2 * stride]);
+ row3.load(&src[col + 3 * stride]);
c2 = cm2;
c3 = cm3;
@@ -93,8 +93,8 @@
if (N == 8)
{
- row4.load(&src[col + 4 * cstride]);
- row5.load(&src[col + 5 * cstride]);
+ row4.load(&src[col + 4 * stride]);
+ row5.load(&src[col + 5 * stride]);
c4 = cm4;
c5 = cm5;
@@ -110,8 +110,8 @@
sum_first += row0_first + row1_first;
sum_last += row0_last + row1_last;
- row6.load(&src[col + 6 * cstride]);
- row7.load(&src[col + 7 * cstride]);
+ row6.load(&src[col + 6 * stride]);
+ row7.load(&src[col + 7 * stride]);
c6 = cm6;
c7 = cm7;
@@ -146,7 +146,7 @@
Vec4i c0, c1, c2, c3, c4, c5, c6, c7;
row0.load(&src[col]);
- row1.load(&src[col + cstride]);
+ row1.load(&src[col + stride]);
c0 = cm0;
c1 = cm1;
@@ -158,8 +158,8 @@
sum_first = row0_first + row1_first;
- row2.load(&src[col + 2 * cstride]);
- row3.load(&src[col + 3 * cstride]);
+ row2.load(&src[col + 2 * stride]);
+ row3.load(&src[col + 3 * stride]);
c2 = cm2;
c3 = cm3;
@@ -171,8 +171,8 @@
sum_first += row0_first + row1_first;
if (N == 8)
{
- row4.load(&src[col + 4 * cstride]);
- row5.load(&src[col + 5 * cstride]);
+ row4.load(&src[col + 4 * stride]);
+ row5.load(&src[col + 5 * stride]);
c4 = cm4;
c5 = cm5;
@@ -183,8 +183,8 @@
row1_first = row1_first * c5;
sum_first += row0_first + row1_first;
- row6.load(&src[col + 6 * cstride]);
- row7.load(&src[col + 7 * cstride]);
+ row6.load(&src[col + 6 * stride]);
+ row7.load(&src[col + 7 * stride]);
c6 = cm6;
c7 = cm7;
@@ -205,7 +205,7 @@
sum_uc.store_partial(block_width - col, dst + col);
}
- src += srcStride;
+ src += srstride;
dst += dstStride;
}
}
@@ -218,7 +218,7 @@
#if INSTRSET >= 5
#define PROCESSROW(a0, a1, a2, a3, a4, a5, a6, a7) { \
- tmp = _mm_loadu_si128((__m128i const*)(src + col + (row + 7) * cstride)); \
+ tmp = _mm_loadu_si128((__m128i const*)(src + col + (row + 7) * stride)); \
a7 = _mm_unpacklo_epi16(tmp, _mm_srai_epi16(tmp, 15)); \
exp1 = _mm_sub_epi32(_mm_sub_epi32(_mm_sll_epi32(a1, _mm_cvtsi32_si128(2)), a0), _mm_mullo_epi32(a2, _mm_set1_epi32(10))); \
exp2 = _mm_mullo_epi32(a3, _mm_set1_epi32(40)); \
@@ -258,7 +258,7 @@
}
#else /* if INSTRSET >= 5 */
#define PROCESSROW(a0, a1, a2, a3, a4, a5, a6, a7) { \
- tmp = _mm_loadu_si128((__m128i const*)(src + col + (row + 7) * cstride)); \
+ tmp = _mm_loadu_si128((__m128i const*)(src + col + (row + 7) * stride)); \
a7 = _mm_unpacklo_epi16(tmp, _mm_srai_epi16(tmp, 15)); \
/* calculation
@@ -328,12 +328,12 @@
}
#endif /* if INSTRSET >= 5 */
-void filterVerticalMultiplaneExtend(int /*bitDepth*/, short *src, int srcStride, pixel *dstE, pixel *dstI, pixel *dstP, int dstStride, int block_width, int block_height, int marginX, int marginY)
+void filterVerticalMultiplaneExtend(int /*bitDepth*/, short *src, int srstride, pixel *dstE, pixel *dstI, pixel *dstP, int dstStride, int block_width, int block_height, int marginX, int marginY)
{
int row, col;
- int cstride = srcStride;
+ int stride = srstride;
- src -= (8 / 2 - 1) * cstride;
+ src -= (8 / 2 - 1) * stride;
int offset;
int headRoom = IF_INTERNAL_PREC - 8;
int shift = IF_FILTER_PREC;
@@ -359,17 +359,17 @@
tmp = _mm_loadu_si128((__m128i const*)(src + col));
a0 = _mm_unpacklo_epi16(tmp, _mm_srai_epi16(tmp, 15));
- tmp = _mm_loadu_si128((__m128i const*)(src + col + cstride));
+ tmp = _mm_loadu_si128((__m128i const*)(src + col + stride));
a1 = _mm_unpacklo_epi16(tmp, _mm_srai_epi16(tmp, 15));
- tmp = _mm_loadu_si128((__m128i const*)(src + col + 2 * cstride));
+ tmp = _mm_loadu_si128((__m128i const*)(src + col + 2 * stride));
a2 = _mm_unpacklo_epi16(tmp, _mm_srai_epi16(tmp, 15));
- tmp = _mm_loadu_si128((__m128i const*)(src + col + 3 * cstride));
+ tmp = _mm_loadu_si128((__m128i const*)(src + col + 3 * stride));
a3 = _mm_unpacklo_epi16(tmp, _mm_srai_epi16(tmp, 15));
- tmp = _mm_loadu_si128((__m128i const*)(src + col + 4 * cstride));
+ tmp = _mm_loadu_si128((__m128i const*)(src + col + 4 * stride));
a4 = _mm_unpacklo_epi16(tmp, _mm_srai_epi16(tmp, 15));
- tmp = _mm_loadu_si128((__m128i const*)(src + col + 5 * cstride));
+ tmp = _mm_loadu_si128((__m128i const*)(src + col + 5 * stride));
a5 = _mm_unpacklo_epi16(tmp, _mm_srai_epi16(tmp, 15));
- tmp = _mm_loadu_si128((__m128i const*)(src + col + 6 * cstride));
+ tmp = _mm_loadu_si128((__m128i const*)(src + col + 6 * stride));
a6 = _mm_unpacklo_epi16(tmp, _mm_srai_epi16(tmp, 15));
for (row = 0; row < block_height; row++)
@@ -390,17 +390,17 @@
{
tmp = _mm_loadu_si128((__m128i const*)(src + col));
a0 = _mm_unpacklo_epi16(tmp, _mm_srai_epi16(tmp, 15));
- tmp = _mm_loadu_si128((__m128i const*)(src + col + cstride));
+ tmp = _mm_loadu_si128((__m128i const*)(src + col + stride));
a1 = _mm_unpacklo_epi16(tmp, _mm_srai_epi16(tmp, 15));
- tmp = _mm_loadu_si128((__m128i const*)(src + col + 2 * cstride));
+ tmp = _mm_loadu_si128((__m128i const*)(src + col + 2 * stride));
a2 = _mm_unpacklo_epi16(tmp, _mm_srai_epi16(tmp, 15));
- tmp = _mm_loadu_si128((__m128i const*)(src + col + 3 * cstride));
+ tmp = _mm_loadu_si128((__m128i const*)(src + col + 3 * stride));
a3 = _mm_unpacklo_epi16(tmp, _mm_srai_epi16(tmp, 15));
- tmp = _mm_loadu_si128((__m128i const*)(src + col + 4 * cstride));
+ tmp = _mm_loadu_si128((__m128i const*)(src + col + 4 * stride));
a4 = _mm_unpacklo_epi16(tmp, _mm_srai_epi16(tmp, 15));
- tmp = _mm_loadu_si128((__m128i const*)(src + col + 5 * cstride));
+ tmp = _mm_loadu_si128((__m128i const*)(src + col + 5 * stride));
a5 = _mm_unpacklo_epi16(tmp, _mm_srai_epi16(tmp, 15));
- tmp = _mm_loadu_si128((__m128i const*)(src + col + 6 * cstride));
+ tmp = _mm_loadu_si128((__m128i const*)(src + col + 6 * stride));
a6 = _mm_unpacklo_epi16(tmp, _mm_srai_epi16(tmp, 15));
for (row = 0; row < block_height; row++)
@@ -418,17 +418,17 @@
tmp = _mm_loadu_si128((__m128i const*)(src + col));
a0 = _mm_unpacklo_epi16(tmp, _mm_srai_epi16(tmp, 15));
- tmp = _mm_loadu_si128((__m128i const*)(src + col + cstride));
+ tmp = _mm_loadu_si128((__m128i const*)(src + col + stride));
a1 = _mm_unpacklo_epi16(tmp, _mm_srai_epi16(tmp, 15));
- tmp = _mm_loadu_si128((__m128i const*)(src + col + 2 * cstride));
+ tmp = _mm_loadu_si128((__m128i const*)(src + col + 2 * stride));
a2 = _mm_unpacklo_epi16(tmp, _mm_srai_epi16(tmp, 15));
- tmp = _mm_loadu_si128((__m128i const*)(src + col + 3 * cstride));
+ tmp = _mm_loadu_si128((__m128i const*)(src + col + 3 * stride));
a3 = _mm_unpacklo_epi16(tmp, _mm_srai_epi16(tmp, 15));
- tmp = _mm_loadu_si128((__m128i const*)(src + col + 4 * cstride));
+ tmp = _mm_loadu_si128((__m128i const*)(src + col + 4 * stride));
a4 = _mm_unpacklo_epi16(tmp, _mm_srai_epi16(tmp, 15));
- tmp = _mm_loadu_si128((__m128i const*)(src + col + 5 * cstride));
+ tmp = _mm_loadu_si128((__m128i const*)(src + col + 5 * stride));
a5 = _mm_unpacklo_epi16(tmp, _mm_srai_epi16(tmp, 15));
- tmp = _mm_loadu_si128((__m128i const*)(src + col + 6 * cstride));
+ tmp = _mm_loadu_si128((__m128i const*)(src + col + 6 * stride));
a6 = _mm_unpacklo_epi16(tmp, _mm_srai_epi16(tmp, 15));
for (row = 0; row < block_height; row++)
@@ -467,13 +467,13 @@
}
template<int N>
-void filterVertical_pel_pel(int bitDepth, pixel *src, int srcStride, pixel *dst, int dstStride, int block_width, int block_height, short const *coeff)
+void filterVertical_p_p(int bitDepth, pixel *src, int srstride, pixel *dst, int dstStride, int block_width, int block_height, short const *coeff)
{
int row, col;
- int cstride = srcStride;
+ int stride = srstride;
- src -= (N / 2 - 1) * cstride;
+ src -= (N / 2 - 1) * stride;
int offset;
short maxVal;
@@ -512,7 +512,7 @@
Vec8s sum_first, sum_last;
row0.load(&src[col]);
- row1.load(&src[col + cstride]);
+ row1.load(&src[col + stride]);
c0 = cm[0];
c1 = cm[1];
@@ -530,8 +530,8 @@
sum_first = row0_first + row1_first;
sum_last = row0_last + row1_last;
- row2.load(&src[col + 2 * cstride]);
- row3.load(&src[col + 3 * cstride]);
+ row2.load(&src[col + 2 * stride]);
+ row3.load(&src[col + 3 * stride]);
c2 = cm[2];
c3 = cm[3];
@@ -549,8 +549,8 @@
if (N == 8)
{
- row4.load(&src[col + 4 * cstride]);
- row5.load(&src[col + 5 * cstride]);
+ row4.load(&src[col + 4 * stride]);
+ row5.load(&src[col + 5 * stride]);
c4 = cm[4];
c5 = cm[5];
@@ -566,8 +566,8 @@
sum_first += row0_first + row1_first;
sum_last += row0_last + row1_last;
- row6.load(&src[col + 6 * cstride]);
- row7.load(&src[col + 7 * cstride]);
+ row6.load(&src[col + 6 * stride]);
+ row7.load(&src[col + 7 * stride]);
c6 = cm[6];
c7 = cm[7];
@@ -607,7 +607,7 @@
Vec8s sum_first, sum_last;
row0.load(&src[col]);
- row1.load(&src[col + cstride]);
+ row1.load(&src[col + stride]);
c0 = cm[0];
c1 = cm[1];
@@ -619,8 +619,8 @@
sum_first = row0_first + row1_first;
- row2.load(&src[col + 2 * cstride]);
- row3.load(&src[col + 3 * cstride]);
+ row2.load(&src[col + 2 * stride]);
+ row3.load(&src[col + 3 * stride]);
c2 = cm[2];
c3 = cm[3];
@@ -634,8 +634,8 @@
if (N == 8)
{
- row4.load(&src[col + 4 * cstride]);
- row5.load(&src[col + 5 * cstride]);
+ row4.load(&src[col + 4 * stride]);
+ row5.load(&src[col + 5 * stride]);
c4 = cm[4];
c5 = cm[5];
@@ -646,8 +646,8 @@
row1_first = row1_first * c5;
sum_first += row0_first + row1_first;
- row6.load(&src[col + 6 * cstride]);
- row7.load(&src[col + 7 * cstride]);
+ row6.load(&src[col + 6 * stride]);
+ row7.load(&src[col + 7 * stride]);
c6 = cm[6];
c7 = cm[7];
@@ -667,18 +667,18 @@
sum.store_partial(block_width - col, dst + col);
}
- src += srcStride;
+ src += srstride;
dst += dstStride;
}
}
template<int N>
-void filterHorizontal_pel_pel(int bitDepth, pixel *src, int srcStride, pixel *dst, int dstStride, int block_width, int block_height, short const *coeff)
+void filterHorizontal_p_p(int bitDepth, pixel *src, int srstride, pixel *dst, int dstStride, int block_width, int block_height, short const *coeff)
{
int row, col;
- src -= (N / 2 - 1); // Here cStride = 1
+ src -= (N / 2 - 1); // Here stride = 1
int offset;
short maxVal;
int headRoom = IF_INTERNAL_PREC - bitDepth;
@@ -750,13 +750,13 @@
dst[col] = (pixel)val;
}
- src += srcStride;
+ src += srstride;
dst += dstStride;
}
}
#if INSTRSET < 5
-void filterHorizontalMultiplaneExtend(int /*bitDepth*/, pixel *src, int srcStride, short *dstF, short* dstA, short* dstB, short* dstC, int dstStride, pixel *pDstA, pixel *pDstB, pixel *pDstC, int pDstStride, int block_width, int block_height, int marginX, int marginY)
+void filterHorizontalMultiplaneExtend(int /*bitDepth*/, pixel *src, int srstride, short *dstF, short* dstA, short* dstB, short* dstC, int dstStride, pixel *pDstA, pixel *pDstB, pixel *pDstC, int pDstStride, int block_width, int block_height, int marginX, int marginY)
{
int row, col;
@@ -1021,7 +1021,7 @@
_mm_storeu_si128((__m128i*)(pDstB + row * pDstStride + block_width + marginX - 16), tmp16b);
_mm_storeu_si128((__m128i*)(pDstC + row * pDstStride + block_width + marginX - 16), tmp16c);
- src += srcStride;
+ src += srstride;
dstF += dstStride;
dstA += dstStride;
dstB += dstStride;
@@ -1052,7 +1052,7 @@
memcpy(pp - y * pDstStride, pp, block_width + marginX * 2);
}
#else
-void filterHorizontalMultiplaneExtend(int /*bitDepth*/, pixel *src, int srcStride, short *dstF, short* dstA, short* dstB, short* dstC, int dstStride, pixel *pDstA, pixel *pDstB, pixel *pDstC, int pDstStride, int block_width, int block_height, int marginX, int marginY)
+void filterHorizontalMultiplaneExtend(int /*bitDepth*/, pixel *src, int srstride, short *dstF, short* dstA, short* dstB, short* dstC, int dstStride, pixel *pDstA, pixel *pDstB, pixel *pDstC, int pDstStride, int block_width, int block_height, int marginX, int marginY)
{
int row, col;
@@ -1322,7 +1322,7 @@
_mm_storeu_si128((__m128i*)(pDstB + row * pDstStride + block_width + marginX - 16), tmp16b);
_mm_storeu_si128((__m128i*)(pDstC + row * pDstStride + block_width + marginX - 16), tmp16c);
- src += srcStride;
+ src += srstride;
dstF += dstStride;
dstA += dstStride;
dstB += dstStride;
@@ -1355,7 +1355,7 @@
#endif
template<int N>
-void filterHorizontal_pel_short(int bitDepth, pixel *src, int srcStride, short *dst, int dstStride, int block_width, int block_height, short const *coeff)
+void filterHorizontal_p_s(int bitDepth, pixel *src, int srstride, short *dst, int dstStride, int block_width, int block_height, short const *coeff)
{
int row, col;
@@ -1426,12 +1426,12 @@
dst[col] = val;
}
- src += srcStride;
+ src += srstride;
dst += dstStride;
}
}
-void filterConvertPelToShort(int bitDepth, pixel *src, int srcStride, short *dst, int dstStride, int width, int height)
+void filterConvertPelToShort(int bitDepth, pixel *src, int srstride, short *dst, int dstStride, int width, int height)
{
pixel* srcOrg = src;
short* dstOrg = dst;
@@ -1450,7 +1450,7 @@
dst_v.store(dst + col);
}
- src += srcStride;
+ src += srstride;
dst += dstStride;
}
@@ -1465,13 +1465,13 @@
val_v = extend_low(src_v) << shift;
dst_v = val_v - IF_INTERNAL_OFFS;
dst_v.store_partial(width - col, dst + col);
- src += srcStride;
+ src += srstride;
dst += dstStride;
}
}
}
-void filterConvertShortToPel(int bitDepth, short *src, int srcStride, pixel *dst, int dstStride, int width, int height)
+void filterConvertShortToPel(int bitDepth, short *src, int srstride, pixel *dst, int dstStride, int width, int height)
{
short* srcOrg = src;
pixel* dstOrg = dst;
@@ -1496,7 +1496,7 @@
val_uc.store_partial(8, dst + col);
}
- src += srcStride;
+ src += srstride;
dst += dstStride;
}
@@ -1513,7 +1513,7 @@
val_c = min(val_c, maxVal);
val_uc = compress(val_c, val_zero);
val_uc.store_partial(width - col, dst + col);
- src += srcStride;
+ src += srstride;
dst += dstStride;
}
}
More information about the x265-devel
mailing list