[x265] [PATCH] primitive function for luma and croma for loops in addAvg()
dnyaneshwar at multicorewareinc.com
dnyaneshwar at multicorewareinc.com
Wed Nov 13 13:06:41 CET 2013
# HG changeset patch
# User Dnyaneshwar Gorade <dnyaneshwar at multicorewareinc.com>
# Date 1384342556 -19800
# Wed Nov 13 17:05:56 2013 +0530
# Node ID c14e25a6ad035ac664548d6ada3e2e6e244bb3ce
# Parent c4ca80d19105ccf1ba2ec14dd65915f2820a660d
primitive function for luma and croma for loops in addAvg()
diff -r c4ca80d19105 -r c14e25a6ad03 source/Lib/TLibCommon/TComYuv.cpp
--- a/source/Lib/TLibCommon/TComYuv.cpp Tue Nov 12 19:10:23 2013 +0530
+++ b/source/Lib/TLibCommon/TComYuv.cpp Wed Nov 13 17:05:56 2013 +0530
@@ -590,7 +590,6 @@
void TComYuv::addAvg(TShortYUV* srcYuv0, TShortYUV* srcYuv1, uint32_t partUnitIdx, uint32_t width, uint32_t height, bool bLuma, bool bChroma)
{
- int x, y;
uint32_t src0Stride, src1Stride, dststride;
int shiftNum, offset;
@@ -614,20 +613,8 @@
shiftNum = IF_INTERNAL_PREC + 1 - X265_DEPTH;
offset = (1 << (shiftNum - 1)) + 2 * IF_INTERNAL_OFFS;
- for (y = 0; y < height; y++)
- {
- for (x = 0; x < width; x += 4)
- {
- dstY[x + 0] = ClipY((srcY0[x + 0] + srcY1[x + 0] + offset) >> shiftNum);
- dstY[x + 1] = ClipY((srcY0[x + 1] + srcY1[x + 1] + offset) >> shiftNum);
- dstY[x + 2] = ClipY((srcY0[x + 2] + srcY1[x + 2] + offset) >> shiftNum);
- dstY[x + 3] = ClipY((srcY0[x + 3] + srcY1[x + 3] + offset) >> shiftNum);
- }
-
- srcY0 += src0Stride;
- srcY1 += src1Stride;
- dstY += dststride;
- }
+ int part = partitionFromSizes(width, height);
+ primitives.addAvg[part](dstY, dststride, srcY0, src0Stride, srcY1, src1Stride);
}
if (bChroma)
{
@@ -641,26 +628,9 @@
width >>= m_hChromaShift;
height >>= m_vChromaShift;
- for (y = height - 1; y >= 0; y--)
- {
- for (x = width - 1; x >= 0; )
- {
- // note: chroma min width is 2
- dstU[x] = ClipC((srcU0[x] + srcU1[x] + offset) >> shiftNum);
- dstV[x] = ClipC((srcV0[x] + srcV1[x] + offset) >> shiftNum);
- x--;
- dstU[x] = ClipC((srcU0[x] + srcU1[x] + offset) >> shiftNum);
- dstV[x] = ClipC((srcV0[x] + srcV1[x] + offset) >> shiftNum);
- x--;
- }
-
- srcU0 += src0Stride;
- srcU1 += src1Stride;
- srcV0 += src0Stride;
- srcV1 += src1Stride;
- dstU += dststride;
- dstV += dststride;
- }
+ int part = partitionFromSizes(width, height);
+ primitives.addAvg[part](dstU, dststride, srcU0, src0Stride, srcU1, src1Stride);
+ primitives.addAvg[part](dstV, dststride, srcV0, src0Stride, srcV1, src1Stride);
}
}
diff -r c4ca80d19105 -r c14e25a6ad03 source/common/pixel.cpp
--- a/source/common/pixel.cpp Tue Nov 12 19:10:23 2013 +0530
+++ b/source/common/pixel.cpp Wed Nov 13 17:05:56 2013 +0530
@@ -794,6 +794,27 @@
a += dstride;
}
}
+
+template<int bx, int by>
+void addAvg(pixel* dst, intptr_t dstStride, int16_t* src0, intptr_t src0Stride, int16_t* src1, intptr_t src1Stride)
+{
+ int shiftNum, offset;
+ shiftNum = IF_INTERNAL_PREC + 1 - X265_DEPTH;
+ offset = (1 << (shiftNum - 1)) + 2 * IF_INTERNAL_OFFS;
+
+ for (int y = 0; y < by; y++)
+ {
+ for (int x = 0; x < bx; x += 2)
+ {
+ dst[x + 0] = ClipY((src0[x + 0] + src1[x + 0] + offset) >> shiftNum);
+ dst[x + 1] = ClipY((src0[x + 1] + src1[x + 1] + offset) >> shiftNum);
+ }
+
+ src0 += src0Stride;
+ src1 += src1Stride;
+ dst += dstStride;
+ }
+}
} // end anonymous namespace
namespace x265 {
@@ -807,6 +828,33 @@
SET_FUNC_PRIMITIVE_TABLE_C2(sad_x4)
SET_FUNC_PRIMITIVE_TABLE_C2(pixelavg_pp)
+ //addAvg
+ p.addAvg[LUMA_4x4] = addAvg<4, 4>;
+ p.addAvg[LUMA_8x8] = addAvg<8, 8>;
+ p.addAvg[LUMA_8x4] = addAvg<8, 4>;
+ p.addAvg[LUMA_4x8] = addAvg<4, 8>;
+ p.addAvg[LUMA_16x16] = addAvg<16, 16>;
+ p.addAvg[LUMA_16x8] = addAvg<16, 8>;
+ p.addAvg[LUMA_8x16] = addAvg<8, 16>;
+ p.addAvg[LUMA_16x12] = addAvg<16, 12>;
+ p.addAvg[LUMA_12x16] = addAvg<12, 16>;
+ p.addAvg[LUMA_16x4] = addAvg<16, 4>;
+ p.addAvg[LUMA_4x16] = addAvg<4, 16>;
+ p.addAvg[LUMA_32x32] = addAvg<32, 32>;
+ p.addAvg[LUMA_32x16] = addAvg<32, 16>;
+ p.addAvg[LUMA_16x32] = addAvg<16, 32>;
+ p.addAvg[LUMA_32x24] = addAvg<32, 24>;
+ p.addAvg[LUMA_24x32] = addAvg<24, 32>;
+ p.addAvg[LUMA_32x8] = addAvg<32, 8>;
+ p.addAvg[LUMA_8x32] = addAvg<8, 32>;
+ p.addAvg[LUMA_64x64] = addAvg<64, 64>;
+ p.addAvg[LUMA_64x32] = addAvg<64, 32>;
+ p.addAvg[LUMA_32x64] = addAvg<32, 64>;
+ p.addAvg[LUMA_64x48] = addAvg<64, 48>;
+ p.addAvg[LUMA_48x64] = addAvg<48, 64>;
+ p.addAvg[LUMA_64x16] = addAvg<64, 16>;
+ p.addAvg[LUMA_16x64] = addAvg<16, 64>;
+
// satd
p.satd[LUMA_4x4] = satd_4x4;
p.satd[LUMA_8x8] = satd8<8, 8>;
diff -r c4ca80d19105 -r c14e25a6ad03 source/common/primitives.h
--- a/source/common/primitives.h Tue Nov 12 19:10:23 2013 +0530
+++ b/source/common/primitives.h Wed Nov 13 17:05:56 2013 +0530
@@ -208,6 +208,8 @@
typedef void (*pixel_sub_ps_t)(int16_t *dst, intptr_t dstride, pixel *src0, pixel *src1, intptr_t sstride0, intptr_t sstride1);
+typedef void (*addAvg_t)(pixel* dst, intptr_t dstStride, int16_t* src0, intptr_t src0Stride, int16_t* src1, intptr_t src1Stride);
+
/* Define a structure containing function pointers to optimized encoder
* primitives. Each pointer can reference either an assembly routine,
* a vectorized primitive, or a C function. */
@@ -288,6 +290,8 @@
var_t var[NUM_LUMA_PARTITIONS];
ssim_4x4x2_core_t ssim_4x4x2_core;
plane_copy_deinterleave_t plane_copy_deinterleave_c;
+
+ addAvg_t addAvg[NUM_LUMA_PARTITIONS];
};
/* This copy of the table is what gets used by the encoder.
More information about the x265-devel
mailing list