[x265] [PATCH 1 of 7] AQ: Re-enable fine grained adaptive quantization
deepthi at multicorewareinc.com
deepthi at multicorewareinc.com
Sat Apr 18 08:22:03 CEST 2015
# HG changeset patch
# User Deepthi Nandakumar <deepthi at multicorewareinc.com>
# Date 1429335968 -19800
# Sat Apr 18 11:16:08 2015 +0530
# Node ID c20bac870095b72d025f7f2916bd82844e586004
# Parent 3ec6052eaf9c1c1e3a280fa6d3fb392902b2a849
AQ: Re-enable fine grained adaptive quantization
diff -r 3ec6052eaf9c -r c20bac870095 source/common/cudata.cpp
--- a/source/common/cudata.cpp Fri Apr 17 14:02:26 2015 -0700
+++ b/source/common/cudata.cpp Sat Apr 18 11:16:08 2015 +0530
@@ -298,7 +298,7 @@
}
// initialize Sub partition
-void CUData::initSubCU(const CUData& ctu, const CUGeom& cuGeom)
+void CUData::initSubCU(const CUData& ctu, const CUGeom& cuGeom, int qp)
{
m_absIdxInCTU = cuGeom.absPartIdx;
m_encData = ctu.m_encData;
@@ -312,8 +312,8 @@
m_cuAboveRight = ctu.m_cuAboveRight;
X265_CHECK(m_numPartitions == cuGeom.numPartitions, "initSubCU() size mismatch\n");
- /* sequential memsets */
- m_partSet((uint8_t*)m_qp, (uint8_t)ctu.m_qp[0]);
+ m_partSet((uint8_t*)m_qp, (uint8_t)qp);
+
m_partSet(m_log2CUSize, (uint8_t)cuGeom.log2CUSize);
m_partSet(m_lumaIntraDir, (uint8_t)DC_IDX);
m_partSet(m_tqBypass, (uint8_t)m_encData->m_param->bLossless);
diff -r 3ec6052eaf9c -r c20bac870095 source/common/cudata.h
--- a/source/common/cudata.h Fri Apr 17 14:02:26 2015 -0700
+++ b/source/common/cudata.h Sat Apr 18 11:16:08 2015 +0530
@@ -182,7 +182,7 @@
static void calcCTUGeoms(uint32_t ctuWidth, uint32_t ctuHeight, uint32_t maxCUSize, uint32_t minCUSize, CUGeom cuDataArray[CUGeom::MAX_GEOMS]);
void initCTU(const Frame& frame, uint32_t cuAddr, int qp);
- void initSubCU(const CUData& ctu, const CUGeom& cuGeom);
+ void initSubCU(const CUData& ctu, const CUGeom& cuGeom, int qp);
void initLosslessCU(const CUData& cu, const CUGeom& cuGeom);
void copyPartFrom(const CUData& cu, const CUGeom& childGeom, uint32_t subPartIdx);
diff -r 3ec6052eaf9c -r c20bac870095 source/encoder/analysis.cpp
--- a/source/encoder/analysis.cpp Fri Apr 17 14:02:26 2015 -0700
+++ b/source/encoder/analysis.cpp Sat Apr 18 11:16:08 2015 +0530
@@ -75,6 +75,8 @@
m_reuseInterDataCTU = NULL;
m_reuseRef = NULL;
m_reuseBestMergeCand = NULL;
+ for (int i = 0; i < NUM_CU_DEPTH; i++)
+ m_qp[i] = NULL;
}
bool Analysis::create(ThreadLocalData *tld)
@@ -101,9 +103,12 @@
ok &= md.pred[j].reconYuv.create(cuSize, csp);
md.pred[j].fencYuv = &md.fencYuv;
}
+ CHECKED_MALLOC(m_qp[depth], int, (size_t)1 << (depth << 1));
}
return ok;
+fail:
+ return false;
}
void Analysis::destroy()
@@ -118,6 +123,7 @@
m_modeDepth[i].pred[j].predYuv.destroy();
m_modeDepth[i].pred[j].reconYuv.destroy();
}
+ X265_FREE(m_qp[i]);
}
}
@@ -132,6 +138,38 @@
m_modeDepth[i].pred[j].invalidate();
#endif
invalidateContexts(0);
+ if (m_slice->m_pps->bUseDQP)
+ {
+ CUGeom *curCUGeom = (CUGeom *)&cuGeom;
+ CUGeom *parentGeom = (CUGeom *)&cuGeom;
+
+ /* TODO: In future, we should extend this to 8x8 QGs as well, since that's the minimum size
+ allowed by the HEVC standard. The AQ offset calculation will need to be at 8x8 granularity.
+ And this messy section will need to be reworked */
+
+ m_qp[0][0] = calculateQpforCuSize(ctu, *curCUGeom);
+ curCUGeom = curCUGeom + curCUGeom->childOffset;
+ parentGeom = curCUGeom;
+ if (m_slice->m_pps->maxCuDQPDepth >= 1)
+ {
+ for (int i = 0; i < 4; i++)
+ {
+ m_qp[1][i] = calculateQpforCuSize(ctu, *(parentGeom + i));
+ if (m_slice->m_pps->maxCuDQPDepth == 2)
+ {
+ curCUGeom = parentGeom + i + (parentGeom + i)->childOffset;
+ for (int j = 0; j < 4; j++)
+ m_qp[2][i * 4 + j] = calculateQpforCuSize(ctu, *(curCUGeom + j));
+ }
+ }
+ }
+ this->setQP(*m_slice, m_qp[0][0]);
+ m_qp[0][0] = x265_clip3(QP_MIN, QP_MAX_SPEC, m_qp[0][0]);
+ ctu.setQPSubParts((int8_t)m_qp[0][0], 0, 0);
+ }
+ else
+ m_qp[0][0] = m_slice->m_sliceQp;
+
m_quant.setQPforQuant(ctu);
m_rqt[0].cur.load(initialContext);
m_modeDepth[0].fencYuv.copyFromPicYuv(*m_frame->m_fencPic, ctu.m_cuAddr, 0);
@@ -155,7 +193,7 @@
uint32_t zOrder = 0;
if (m_slice->m_sliceType == I_SLICE)
{
- compressIntraCU(ctu, cuGeom, zOrder);
+ compressIntraCU(ctu, cuGeom, zOrder, m_qp[0][0], 0);
if (m_param->analysisMode == X265_ANALYSIS_SAVE && m_frame->m_analysisData.intraData)
{
CUData *bestCU = &m_modeDepth[0].bestMode->cu;
@@ -173,18 +211,18 @@
* they are available for intra predictions */
m_modeDepth[0].fencYuv.copyToPicYuv(*m_frame->m_reconPic, ctu.m_cuAddr, 0);
- compressInterCU_rd0_4(ctu, cuGeom);
+ compressInterCU_rd0_4(ctu, cuGeom, m_qp[0][0], 0);
/* generate residual for entire CTU at once and copy to reconPic */
encodeResidue(ctu, cuGeom);
}
else if (m_param->bDistributeModeAnalysis && m_param->rdLevel >= 2)
- compressInterCU_dist(ctu, cuGeom);
+ compressInterCU_dist(ctu, cuGeom, m_qp[0][0], 0);
else if (m_param->rdLevel <= 4)
- compressInterCU_rd0_4(ctu, cuGeom);
+ compressInterCU_rd0_4(ctu, cuGeom, m_qp[0][0], 0);
else
{
- compressInterCU_rd5_6(ctu, cuGeom, zOrder);
+ compressInterCU_rd5_6(ctu, cuGeom, zOrder, m_qp[0][0], 0);
if (m_param->analysisMode == X265_ANALYSIS_SAVE && m_frame->m_analysisData.interData)
{
CUData *bestCU = &m_modeDepth[0].bestMode->cu;
@@ -223,7 +261,7 @@
}
}
-void Analysis::compressIntraCU(const CUData& parentCTU, const CUGeom& cuGeom, uint32_t& zOrder)
+void Analysis::compressIntraCU(const CUData& parentCTU, const CUGeom& cuGeom, uint32_t& zOrder, int32_t qp, uint32_t partIdx)
{
uint32_t depth = cuGeom.depth;
ModeDepth& md = m_modeDepth[depth];
@@ -232,6 +270,13 @@
bool mightSplit = !(cuGeom.flags & CUGeom::LEAF);
bool mightNotSplit = !(cuGeom.flags & CUGeom::SPLIT_MANDATORY);
+ if (m_slice->m_pps->bUseDQP && depth && depth <= m_slice->m_pps->maxCuDQPDepth)
+ {
+ qp = m_qp[depth][partIdx];
+ this->setQP(*m_slice, qp);
+ qp = x265_clip3(QP_MIN, QP_MAX_SPEC, qp);
+ }
+
if (m_param->analysisMode == X265_ANALYSIS_LOAD)
{
uint8_t* reuseDepth = &m_reuseIntraDataCTU->depth[parentCTU.m_cuAddr * parentCTU.m_numPartitions];
@@ -241,11 +286,10 @@
if (mightNotSplit && depth == reuseDepth[zOrder] && zOrder == cuGeom.absPartIdx)
{
- m_quant.setQPforQuant(parentCTU);
-
PartSize size = (PartSize)reusePartSizes[zOrder];
Mode& mode = size == SIZE_2Nx2N ? md.pred[PRED_INTRA] : md.pred[PRED_INTRA_NxN];
- mode.cu.initSubCU(parentCTU, cuGeom);
+ mode.cu.initSubCU(parentCTU, cuGeom, qp);
+ m_quant.setQPforQuant(mode.cu);
checkIntra(mode, cuGeom, size, &reuseModes[zOrder], &reuseChromaModes[zOrder]);
checkBestMode(mode, depth);
@@ -262,15 +306,14 @@
}
else if (mightNotSplit)
{
- m_quant.setQPforQuant(parentCTU);
-
- md.pred[PRED_INTRA].cu.initSubCU(parentCTU, cuGeom);
+ md.pred[PRED_INTRA].cu.initSubCU(parentCTU, cuGeom, qp);
+ m_quant.setQPforQuant(md.pred[PRED_INTRA].cu);
checkIntra(md.pred[PRED_INTRA], cuGeom, SIZE_2Nx2N, NULL, NULL);
checkBestMode(md.pred[PRED_INTRA], depth);
if (cuGeom.log2CUSize == 3 && m_slice->m_sps->quadtreeTULog2MinSize < 3)
{
- md.pred[PRED_INTRA_NxN].cu.initSubCU(parentCTU, cuGeom);
+ md.pred[PRED_INTRA_NxN].cu.initSubCU(parentCTU, cuGeom, qp);
checkIntra(md.pred[PRED_INTRA_NxN], cuGeom, SIZE_NxN, NULL, NULL);
checkBestMode(md.pred[PRED_INTRA_NxN], depth);
}
@@ -287,7 +330,7 @@
Mode* splitPred = &md.pred[PRED_SPLIT];
splitPred->initCosts();
CUData* splitCU = &splitPred->cu;
- splitCU->initSubCU(parentCTU, cuGeom);
+ splitCU->initSubCU(parentCTU, cuGeom, qp);
uint32_t nextDepth = depth + 1;
ModeDepth& nd = m_modeDepth[nextDepth];
@@ -301,7 +344,7 @@
{
m_modeDepth[0].fencYuv.copyPartToYuv(nd.fencYuv, childGeom.absPartIdx);
m_rqt[nextDepth].cur.load(*nextContext);
- compressIntraCU(parentCTU, childGeom, zOrder);
+ compressIntraCU(parentCTU, childGeom, zOrder, qp, partIdx * 4 + subPartIdx);
// Save best CU and pred data for this sub CU
splitCU->copyPartFrom(nd.bestMode->cu, childGeom, subPartIdx);
@@ -322,7 +365,7 @@
else
updateModeCost(*splitPred);
- checkDQPForSplitPred(splitPred->cu, cuGeom);
+ checkDQPForSplitPred(*splitPred, cuGeom);
checkBestMode(*splitPred, depth);
}
@@ -490,7 +533,7 @@
while (task >= 0);
}
-void Analysis::compressInterCU_dist(const CUData& parentCTU, const CUGeom& cuGeom)
+void Analysis::compressInterCU_dist(const CUData& parentCTU, const CUGeom& cuGeom, int32_t qp, uint32_t partIdx)
{
uint32_t depth = cuGeom.depth;
uint32_t cuAddr = parentCTU.m_cuAddr;
@@ -503,6 +546,13 @@
X265_CHECK(m_param->rdLevel >= 2, "compressInterCU_dist does not support RD 0 or 1\n");
+ if (m_slice->m_pps->bUseDQP && depth && depth <= m_slice->m_pps->maxCuDQPDepth)
+ {
+ qp = m_qp[depth][partIdx];
+ this->setQP(*m_slice, qp);
+ qp = x265_clip3(QP_MIN, QP_MAX_SPEC, qp);
+ }
+
if (mightNotSplit && depth >= minDepth)
{
int bTryAmp = m_slice->m_sps->maxAMPDepth > depth && (cuGeom.log2CUSize < 6 || m_param->rdLevel > 4);
@@ -511,28 +561,28 @@
PMODE pmode(*this, cuGeom);
/* Initialize all prediction CUs based on parentCTU */
- md.pred[PRED_MERGE].cu.initSubCU(parentCTU, cuGeom);
- md.pred[PRED_SKIP].cu.initSubCU(parentCTU, cuGeom);
+ md.pred[PRED_MERGE].cu.initSubCU(parentCTU, cuGeom, qp);
+ md.pred[PRED_SKIP].cu.initSubCU(parentCTU, cuGeom, qp);
if (bTryIntra)
{
- md.pred[PRED_INTRA].cu.initSubCU(parentCTU, cuGeom);
+ md.pred[PRED_INTRA].cu.initSubCU(parentCTU, cuGeom, qp);
if (cuGeom.log2CUSize == 3 && m_slice->m_sps->quadtreeTULog2MinSize < 3 && m_param->rdLevel >= 5)
- md.pred[PRED_INTRA_NxN].cu.initSubCU(parentCTU, cuGeom);
+ md.pred[PRED_INTRA_NxN].cu.initSubCU(parentCTU, cuGeom, qp);
pmode.modes[pmode.m_jobTotal++] = PRED_INTRA;
}
- md.pred[PRED_2Nx2N].cu.initSubCU(parentCTU, cuGeom); pmode.modes[pmode.m_jobTotal++] = PRED_2Nx2N;
- md.pred[PRED_BIDIR].cu.initSubCU(parentCTU, cuGeom);
+ md.pred[PRED_2Nx2N].cu.initSubCU(parentCTU, cuGeom, qp); pmode.modes[pmode.m_jobTotal++] = PRED_2Nx2N;
+ md.pred[PRED_BIDIR].cu.initSubCU(parentCTU, cuGeom, qp);
if (m_param->bEnableRectInter)
{
- md.pred[PRED_2NxN].cu.initSubCU(parentCTU, cuGeom); pmode.modes[pmode.m_jobTotal++] = PRED_2NxN;
- md.pred[PRED_Nx2N].cu.initSubCU(parentCTU, cuGeom); pmode.modes[pmode.m_jobTotal++] = PRED_Nx2N;
+ md.pred[PRED_2NxN].cu.initSubCU(parentCTU, cuGeom, qp); pmode.modes[pmode.m_jobTotal++] = PRED_2NxN;
+ md.pred[PRED_Nx2N].cu.initSubCU(parentCTU, cuGeom, qp); pmode.modes[pmode.m_jobTotal++] = PRED_Nx2N;
}
if (bTryAmp)
{
- md.pred[PRED_2NxnU].cu.initSubCU(parentCTU, cuGeom); pmode.modes[pmode.m_jobTotal++] = PRED_2NxnU;
- md.pred[PRED_2NxnD].cu.initSubCU(parentCTU, cuGeom); pmode.modes[pmode.m_jobTotal++] = PRED_2NxnD;
- md.pred[PRED_nLx2N].cu.initSubCU(parentCTU, cuGeom); pmode.modes[pmode.m_jobTotal++] = PRED_nLx2N;
- md.pred[PRED_nRx2N].cu.initSubCU(parentCTU, cuGeom); pmode.modes[pmode.m_jobTotal++] = PRED_nRx2N;
+ md.pred[PRED_2NxnU].cu.initSubCU(parentCTU, cuGeom, qp); pmode.modes[pmode.m_jobTotal++] = PRED_2NxnU;
+ md.pred[PRED_2NxnD].cu.initSubCU(parentCTU, cuGeom, qp); pmode.modes[pmode.m_jobTotal++] = PRED_2NxnD;
+ md.pred[PRED_nLx2N].cu.initSubCU(parentCTU, cuGeom, qp); pmode.modes[pmode.m_jobTotal++] = PRED_nLx2N;
+ md.pred[PRED_nRx2N].cu.initSubCU(parentCTU, cuGeom, qp); pmode.modes[pmode.m_jobTotal++] = PRED_nRx2N;
}
pmode.tryBondPeers(*m_frame->m_encData->m_jobProvider, pmode.m_jobTotal);
@@ -662,7 +712,7 @@
if (md.bestMode->rdCost == MAX_INT64 && !bTryIntra)
{
- md.pred[PRED_INTRA].cu.initSubCU(parentCTU, cuGeom);
+ md.pred[PRED_INTRA].cu.initSubCU(parentCTU, cuGeom, qp);
checkIntraInInter(md.pred[PRED_INTRA], cuGeom);
encodeIntraInInter(md.pred[PRED_INTRA], cuGeom);
checkBestMode(md.pred[PRED_INTRA], depth);
@@ -688,7 +738,7 @@
Mode* splitPred = &md.pred[PRED_SPLIT];
splitPred->initCosts();
CUData* splitCU = &splitPred->cu;
- splitCU->initSubCU(parentCTU, cuGeom);
+ splitCU->initSubCU(parentCTU, cuGeom, qp);
uint32_t nextDepth = depth + 1;
ModeDepth& nd = m_modeDepth[nextDepth];
@@ -702,7 +752,7 @@
{
m_modeDepth[0].fencYuv.copyPartToYuv(nd.fencYuv, childGeom.absPartIdx);
m_rqt[nextDepth].cur.load(*nextContext);
- compressInterCU_dist(parentCTU, childGeom);
+ compressInterCU_dist(parentCTU, childGeom, qp, partIdx * 4 + subPartIdx);
// Save best CU and pred data for this sub CU
splitCU->copyPartFrom(nd.bestMode->cu, childGeom, subPartIdx);
@@ -721,7 +771,7 @@
else
updateModeCost(*splitPred);
- checkDQPForSplitPred(splitPred->cu, cuGeom);
+ checkDQPForSplitPred(*splitPred, cuGeom);
checkBestMode(*splitPred, depth);
}
@@ -741,7 +791,7 @@
md.bestMode->reconYuv.copyToPicYuv(*m_frame->m_reconPic, cuAddr, cuGeom.absPartIdx);
}
-void Analysis::compressInterCU_rd0_4(const CUData& parentCTU, const CUGeom& cuGeom)
+void Analysis::compressInterCU_rd0_4(const CUData& parentCTU, const CUGeom& cuGeom, int32_t qp, uint32_t partIdx)
{
uint32_t depth = cuGeom.depth;
uint32_t cuAddr = parentCTU.m_cuAddr;
@@ -752,13 +802,20 @@
bool mightNotSplit = !(cuGeom.flags & CUGeom::SPLIT_MANDATORY);
uint32_t minDepth = topSkipMinDepth(parentCTU, cuGeom);
+ if (m_slice->m_pps->bUseDQP && depth && depth <= m_slice->m_pps->maxCuDQPDepth)
+ {
+ qp = m_qp[depth][partIdx];
+ this->setQP(*m_slice, qp);
+ qp = x265_clip3(QP_MIN, QP_MAX_SPEC, qp);
+ }
+
if (mightNotSplit && depth >= minDepth)
{
bool bTryIntra = m_slice->m_sliceType != B_SLICE || m_param->bIntraInBFrames;
/* Compute Merge Cost */
- md.pred[PRED_MERGE].cu.initSubCU(parentCTU, cuGeom);
- md.pred[PRED_SKIP].cu.initSubCU(parentCTU, cuGeom);
+ md.pred[PRED_MERGE].cu.initSubCU(parentCTU, cuGeom, qp);
+ md.pred[PRED_SKIP].cu.initSubCU(parentCTU, cuGeom, qp);
checkMerge2Nx2N_rd0_4(md.pred[PRED_SKIP], md.pred[PRED_MERGE], cuGeom);
bool earlyskip = false;
@@ -767,24 +824,24 @@
if (!earlyskip)
{
- md.pred[PRED_2Nx2N].cu.initSubCU(parentCTU, cuGeom);
+ md.pred[PRED_2Nx2N].cu.initSubCU(parentCTU, cuGeom, qp);
checkInter_rd0_4(md.pred[PRED_2Nx2N], cuGeom, SIZE_2Nx2N);
if (m_slice->m_sliceType == B_SLICE)
{
- md.pred[PRED_BIDIR].cu.initSubCU(parentCTU, cuGeom);
+ md.pred[PRED_BIDIR].cu.initSubCU(parentCTU, cuGeom, qp);
checkBidir2Nx2N(md.pred[PRED_2Nx2N], md.pred[PRED_BIDIR], cuGeom);
}
Mode *bestInter = &md.pred[PRED_2Nx2N];
if (m_param->bEnableRectInter)
{
- md.pred[PRED_Nx2N].cu.initSubCU(parentCTU, cuGeom);
+ md.pred[PRED_Nx2N].cu.initSubCU(parentCTU, cuGeom, qp);
checkInter_rd0_4(md.pred[PRED_Nx2N], cuGeom, SIZE_Nx2N);
if (md.pred[PRED_Nx2N].sa8dCost < bestInter->sa8dCost)
bestInter = &md.pred[PRED_Nx2N];
- md.pred[PRED_2NxN].cu.initSubCU(parentCTU, cuGeom);
+ md.pred[PRED_2NxN].cu.initSubCU(parentCTU, cuGeom, qp);
checkInter_rd0_4(md.pred[PRED_2NxN], cuGeom, SIZE_2NxN);
if (md.pred[PRED_2NxN].sa8dCost < bestInter->sa8dCost)
bestInter = &md.pred[PRED_2NxN];
@@ -806,24 +863,24 @@
if (bHor)
{
- md.pred[PRED_2NxnU].cu.initSubCU(parentCTU, cuGeom);
+ md.pred[PRED_2NxnU].cu.initSubCU(parentCTU, cuGeom, qp);
checkInter_rd0_4(md.pred[PRED_2NxnU], cuGeom, SIZE_2NxnU);
if (md.pred[PRED_2NxnU].sa8dCost < bestInter->sa8dCost)
bestInter = &md.pred[PRED_2NxnU];
- md.pred[PRED_2NxnD].cu.initSubCU(parentCTU, cuGeom);
+ md.pred[PRED_2NxnD].cu.initSubCU(parentCTU, cuGeom, qp);
checkInter_rd0_4(md.pred[PRED_2NxnD], cuGeom, SIZE_2NxnD);
if (md.pred[PRED_2NxnD].sa8dCost < bestInter->sa8dCost)
bestInter = &md.pred[PRED_2NxnD];
}
if (bVer)
{
- md.pred[PRED_nLx2N].cu.initSubCU(parentCTU, cuGeom);
+ md.pred[PRED_nLx2N].cu.initSubCU(parentCTU, cuGeom, qp);
checkInter_rd0_4(md.pred[PRED_nLx2N], cuGeom, SIZE_nLx2N);
if (md.pred[PRED_nLx2N].sa8dCost < bestInter->sa8dCost)
bestInter = &md.pred[PRED_nLx2N];
- md.pred[PRED_nRx2N].cu.initSubCU(parentCTU, cuGeom);
+ md.pred[PRED_nRx2N].cu.initSubCU(parentCTU, cuGeom, qp);
checkInter_rd0_4(md.pred[PRED_nRx2N], cuGeom, SIZE_nRx2N);
if (md.pred[PRED_nRx2N].sa8dCost < bestInter->sa8dCost)
bestInter = &md.pred[PRED_nRx2N];
@@ -855,7 +912,7 @@
if ((bTryIntra && md.bestMode->cu.getQtRootCbf(0)) ||
md.bestMode->sa8dCost == MAX_INT64)
{
- md.pred[PRED_INTRA].cu.initSubCU(parentCTU, cuGeom);
+ md.pred[PRED_INTRA].cu.initSubCU(parentCTU, cuGeom, qp);
checkIntraInInter(md.pred[PRED_INTRA], cuGeom);
encodeIntraInInter(md.pred[PRED_INTRA], cuGeom);
checkBestMode(md.pred[PRED_INTRA], depth);
@@ -873,7 +930,7 @@
if (bTryIntra || md.bestMode->sa8dCost == MAX_INT64)
{
- md.pred[PRED_INTRA].cu.initSubCU(parentCTU, cuGeom);
+ md.pred[PRED_INTRA].cu.initSubCU(parentCTU, cuGeom, qp);
checkIntraInInter(md.pred[PRED_INTRA], cuGeom);
if (md.pred[PRED_INTRA].sa8dCost < md.bestMode->sa8dCost)
md.bestMode = &md.pred[PRED_INTRA];
@@ -960,7 +1017,7 @@
Mode* splitPred = &md.pred[PRED_SPLIT];
splitPred->initCosts();
CUData* splitCU = &splitPred->cu;
- splitCU->initSubCU(parentCTU, cuGeom);
+ splitCU->initSubCU(parentCTU, cuGeom, qp);
uint32_t nextDepth = depth + 1;
ModeDepth& nd = m_modeDepth[nextDepth];
@@ -974,7 +1031,7 @@
{
m_modeDepth[0].fencYuv.copyPartToYuv(nd.fencYuv, childGeom.absPartIdx);
m_rqt[nextDepth].cur.load(*nextContext);
- compressInterCU_rd0_4(parentCTU, childGeom);
+ compressInterCU_rd0_4(parentCTU, childGeom, qp, partIdx * 4 + subPartIdx);
// Save best CU and pred data for this sub CU
splitCU->copyPartFrom(nd.bestMode->cu, childGeom, subPartIdx);
@@ -1006,7 +1063,7 @@
else if (splitPred->sa8dCost < md.bestMode->sa8dCost)
md.bestMode = splitPred;
- checkDQPForSplitPred(md.bestMode->cu, cuGeom);
+ checkDQPForSplitPred(*md.bestMode, cuGeom);
}
if (mightNotSplit)
{
@@ -1025,7 +1082,7 @@
md.bestMode->reconYuv.copyToPicYuv(*m_frame->m_reconPic, cuAddr, cuGeom.absPartIdx);
}
-void Analysis::compressInterCU_rd5_6(const CUData& parentCTU, const CUGeom& cuGeom, uint32_t &zOrder)
+void Analysis::compressInterCU_rd5_6(const CUData& parentCTU, const CUGeom& cuGeom, uint32_t &zOrder, int32_t qp, uint32_t partIdx)
{
uint32_t depth = cuGeom.depth;
ModeDepth& md = m_modeDepth[depth];
@@ -1034,14 +1091,21 @@
bool mightSplit = !(cuGeom.flags & CUGeom::LEAF);
bool mightNotSplit = !(cuGeom.flags & CUGeom::SPLIT_MANDATORY);
+ if (m_slice->m_pps->bUseDQP && depth && depth <= m_slice->m_pps->maxCuDQPDepth)
+ {
+ qp = m_qp[depth][partIdx];
+ this->setQP(*m_slice, qp);
+ qp = x265_clip3(QP_MIN, QP_MAX_SPEC, qp);
+ }
+
if (m_param->analysisMode == X265_ANALYSIS_LOAD)
{
uint8_t* reuseDepth = &m_reuseInterDataCTU->depth[parentCTU.m_cuAddr * parentCTU.m_numPartitions];
uint8_t* reuseModes = &m_reuseInterDataCTU->modes[parentCTU.m_cuAddr * parentCTU.m_numPartitions];
if (mightNotSplit && depth == reuseDepth[zOrder] && zOrder == cuGeom.absPartIdx && reuseModes[zOrder] == MODE_SKIP)
{
- md.pred[PRED_SKIP].cu.initSubCU(parentCTU, cuGeom);
- md.pred[PRED_MERGE].cu.initSubCU(parentCTU, cuGeom);
+ md.pred[PRED_SKIP].cu.initSubCU(parentCTU, cuGeom, qp);
+ md.pred[PRED_MERGE].cu.initSubCU(parentCTU, cuGeom, qp);
checkMerge2Nx2N_rd5_6(md.pred[PRED_SKIP], md.pred[PRED_MERGE], cuGeom, true);
if (m_bTryLossless)
@@ -1060,20 +1124,20 @@
if (mightNotSplit)
{
- md.pred[PRED_SKIP].cu.initSubCU(parentCTU, cuGeom);
- md.pred[PRED_MERGE].cu.initSubCU(parentCTU, cuGeom);
+ md.pred[PRED_SKIP].cu.initSubCU(parentCTU, cuGeom, qp);
+ md.pred[PRED_MERGE].cu.initSubCU(parentCTU, cuGeom, qp);
checkMerge2Nx2N_rd5_6(md.pred[PRED_SKIP], md.pred[PRED_MERGE], cuGeom, false);
bool earlySkip = m_param->bEnableEarlySkip && md.bestMode && !md.bestMode->cu.getQtRootCbf(0);
if (!earlySkip)
{
- md.pred[PRED_2Nx2N].cu.initSubCU(parentCTU, cuGeom);
+ md.pred[PRED_2Nx2N].cu.initSubCU(parentCTU, cuGeom, qp);
checkInter_rd5_6(md.pred[PRED_2Nx2N], cuGeom, SIZE_2Nx2N, false);
checkBestMode(md.pred[PRED_2Nx2N], cuGeom.depth);
if (m_slice->m_sliceType == B_SLICE)
{
- md.pred[PRED_BIDIR].cu.initSubCU(parentCTU, cuGeom);
+ md.pred[PRED_BIDIR].cu.initSubCU(parentCTU, cuGeom, qp);
checkBidir2Nx2N(md.pred[PRED_2Nx2N], md.pred[PRED_BIDIR], cuGeom);
if (md.pred[PRED_BIDIR].sa8dCost < MAX_INT64)
{
@@ -1084,11 +1148,11 @@
if (m_param->bEnableRectInter)
{
- md.pred[PRED_Nx2N].cu.initSubCU(parentCTU, cuGeom);
+ md.pred[PRED_Nx2N].cu.initSubCU(parentCTU, cuGeom, qp);
checkInter_rd5_6(md.pred[PRED_Nx2N], cuGeom, SIZE_Nx2N, false);
checkBestMode(md.pred[PRED_Nx2N], cuGeom.depth);
- md.pred[PRED_2NxN].cu.initSubCU(parentCTU, cuGeom);
+ md.pred[PRED_2NxN].cu.initSubCU(parentCTU, cuGeom, qp);
checkInter_rd5_6(md.pred[PRED_2NxN], cuGeom, SIZE_2NxN, false);
checkBestMode(md.pred[PRED_2NxN], cuGeom.depth);
}
@@ -1111,21 +1175,21 @@
if (bHor)
{
- md.pred[PRED_2NxnU].cu.initSubCU(parentCTU, cuGeom);
+ md.pred[PRED_2NxnU].cu.initSubCU(parentCTU, cuGeom, qp);
checkInter_rd5_6(md.pred[PRED_2NxnU], cuGeom, SIZE_2NxnU, bMergeOnly);
checkBestMode(md.pred[PRED_2NxnU], cuGeom.depth);
- md.pred[PRED_2NxnD].cu.initSubCU(parentCTU, cuGeom);
+ md.pred[PRED_2NxnD].cu.initSubCU(parentCTU, cuGeom, qp);
checkInter_rd5_6(md.pred[PRED_2NxnD], cuGeom, SIZE_2NxnD, bMergeOnly);
checkBestMode(md.pred[PRED_2NxnD], cuGeom.depth);
}
if (bVer)
{
- md.pred[PRED_nLx2N].cu.initSubCU(parentCTU, cuGeom);
+ md.pred[PRED_nLx2N].cu.initSubCU(parentCTU, cuGeom, qp);
checkInter_rd5_6(md.pred[PRED_nLx2N], cuGeom, SIZE_nLx2N, bMergeOnly);
checkBestMode(md.pred[PRED_nLx2N], cuGeom.depth);
- md.pred[PRED_nRx2N].cu.initSubCU(parentCTU, cuGeom);
+ md.pred[PRED_nRx2N].cu.initSubCU(parentCTU, cuGeom, qp);
checkInter_rd5_6(md.pred[PRED_nRx2N], cuGeom, SIZE_nRx2N, bMergeOnly);
checkBestMode(md.pred[PRED_nRx2N], cuGeom.depth);
}
@@ -1133,13 +1197,13 @@
if (m_slice->m_sliceType != B_SLICE || m_param->bIntraInBFrames)
{
- md.pred[PRED_INTRA].cu.initSubCU(parentCTU, cuGeom);
+ md.pred[PRED_INTRA].cu.initSubCU(parentCTU, cuGeom, qp);
checkIntra(md.pred[PRED_INTRA], cuGeom, SIZE_2Nx2N, NULL, NULL);
checkBestMode(md.pred[PRED_INTRA], depth);
if (cuGeom.log2CUSize == 3 && m_slice->m_sps->quadtreeTULog2MinSize < 3)
{
- md.pred[PRED_INTRA_NxN].cu.initSubCU(parentCTU, cuGeom);
+ md.pred[PRED_INTRA_NxN].cu.initSubCU(parentCTU, cuGeom, qp);
checkIntra(md.pred[PRED_INTRA_NxN], cuGeom, SIZE_NxN, NULL, NULL);
checkBestMode(md.pred[PRED_INTRA_NxN], depth);
}
@@ -1159,7 +1223,7 @@
Mode* splitPred = &md.pred[PRED_SPLIT];
splitPred->initCosts();
CUData* splitCU = &splitPred->cu;
- splitCU->initSubCU(parentCTU, cuGeom);
+ splitCU->initSubCU(parentCTU, cuGeom, qp);
uint32_t nextDepth = depth + 1;
ModeDepth& nd = m_modeDepth[nextDepth];
@@ -1173,7 +1237,7 @@
{
m_modeDepth[0].fencYuv.copyPartToYuv(nd.fencYuv, childGeom.absPartIdx);
m_rqt[nextDepth].cur.load(*nextContext);
- compressInterCU_rd5_6(parentCTU, childGeom, zOrder);
+ compressInterCU_rd5_6(parentCTU, childGeom, zOrder, qp, partIdx * 4 + subPartIdx);
// Save best CU and pred data for this sub CU
splitCU->copyPartFrom(nd.bestMode->cu, childGeom, subPartIdx);
@@ -1193,7 +1257,7 @@
else
updateModeCost(*splitPred);
- checkDQPForSplitPred(splitPred->cu, cuGeom);
+ checkDQPForSplitPred(*splitPred, cuGeom);
checkBestMode(*splitPred, depth);
}
@@ -1308,7 +1372,7 @@
md.bestMode->cu.setPUMv(1, candMvField[bestSadCand][1].mv, 0, 0);
md.bestMode->cu.setPURefIdx(0, (int8_t)candMvField[bestSadCand][0].refIdx, 0, 0);
md.bestMode->cu.setPURefIdx(1, (int8_t)candMvField[bestSadCand][1].refIdx, 0, 0);
- checkDQP(md.bestMode->cu, cuGeom);
+ checkDQP(*md.bestMode, cuGeom);
X265_CHECK(md.bestMode->ok(), "Merge mode not ok\n");
}
@@ -1440,7 +1504,7 @@
bestPred->cu.setPUMv(1, candMvField[bestCand][1].mv, 0, 0);
bestPred->cu.setPURefIdx(0, (int8_t)candMvField[bestCand][0].refIdx, 0, 0);
bestPred->cu.setPURefIdx(1, (int8_t)candMvField[bestCand][1].refIdx, 0, 0);
- checkDQP(bestPred->cu, cuGeom);
+ checkDQP(*bestPred, cuGeom);
X265_CHECK(bestPred->ok(), "merge mode is not ok");
}
diff -r 3ec6052eaf9c -r c20bac870095 source/encoder/analysis.h
--- a/source/encoder/analysis.h Fri Apr 17 14:02:26 2015 -0700
+++ b/source/encoder/analysis.h Sat Apr 18 11:16:08 2015 +0530
@@ -90,6 +90,7 @@
void processPmode(PMODE& pmode, Analysis& slave);
ModeDepth m_modeDepth[NUM_CU_DEPTH];
+ int* m_qp[NUM_CU_DEPTH];
bool m_bTryLossless;
bool m_bChromaSa8d;
@@ -109,12 +110,12 @@
uint32_t* m_reuseBestMergeCand;
/* full analysis for an I-slice CU */
- void compressIntraCU(const CUData& parentCTU, const CUGeom& cuGeom, uint32_t &zOrder);
+ void compressIntraCU(const CUData& parentCTU, const CUGeom& cuGeom, uint32_t &zOrder, int32_t qpDepth, uint32_t partIdx);
/* full analysis for a P or B slice CU */
- void compressInterCU_dist(const CUData& parentCTU, const CUGeom& cuGeom);
- void compressInterCU_rd0_4(const CUData& parentCTU, const CUGeom& cuGeom);
- void compressInterCU_rd5_6(const CUData& parentCTU, const CUGeom& cuGeom, uint32_t &zOrder);
+ void compressInterCU_dist(const CUData& parentCTU, const CUGeom& cuGeom, int32_t qpDepth, uint32_t partIdx);
+ void compressInterCU_rd0_4(const CUData& parentCTU, const CUGeom& cuGeom, int32_t qpDepth, uint32_t partIdx);
+ void compressInterCU_rd5_6(const CUData& parentCTU, const CUGeom& cuGeom, uint32_t &zOrder, int32_t qpDepth, uint32_t partIdx);
/* measure merge and skip */
void checkMerge2Nx2N_rd0_4(Mode& skip, Mode& merge, const CUGeom& cuGeom);
diff -r 3ec6052eaf9c -r c20bac870095 source/encoder/encoder.cpp
--- a/source/encoder/encoder.cpp Fri Apr 17 14:02:26 2015 -0700
+++ b/source/encoder/encoder.cpp Sat Apr 18 11:16:08 2015 +0530
@@ -1582,15 +1582,12 @@
bool bIsVbv = m_param->rc.vbvBufferSize > 0 && m_param->rc.vbvMaxBitrate > 0;
if (!m_param->bLossless && (m_param->rc.aqMode || bIsVbv))
- {
pps->bUseDQP = true;
- pps->maxCuDQPDepth = 0; /* TODO: make configurable? */
- }
else
- {
pps->bUseDQP = false;
- pps->maxCuDQPDepth = 0;
- }
+
+ pps->maxCuDQPDepth = g_log2Size[m_param->maxCUSize] - g_log2Size[m_param->rc.qgSize];
+ X265_CHECK(pps->maxCuDQPDepth <= 2, "max CU DQP depth cannot be greater than 2");
pps->chromaQpOffset[0] = m_param->cbQpOffset;
pps->chromaQpOffset[1] = m_param->crQpOffset;
diff -r 3ec6052eaf9c -r c20bac870095 source/encoder/frameencoder.cpp
--- a/source/encoder/frameencoder.cpp Fri Apr 17 14:02:26 2015 -0700
+++ b/source/encoder/frameencoder.cpp Sat Apr 18 11:16:08 2015 +0530
@@ -852,9 +852,7 @@
if (m_param->rc.aqMode || bIsVbv)
{
int qp = calcQpForCu(cuAddr, curEncData.m_cuStat[cuAddr].baseQp);
- tld.analysis.setQP(*slice, qp);
qp = x265_clip3(QP_MIN, QP_MAX_SPEC, qp);
- ctu->setQPSubParts((int8_t)qp, 0, 0);
curEncData.m_rowStat[row].sumQpAq += qp;
}
else
diff -r 3ec6052eaf9c -r c20bac870095 source/encoder/search.cpp
--- a/source/encoder/search.cpp Fri Apr 17 14:02:26 2015 -0700
+++ b/source/encoder/search.cpp Sat Apr 18 11:16:08 2015 +0530
@@ -1185,7 +1185,7 @@
intraMode.psyEnergy = m_rdCost.psyCost(cuGeom.log2CUSize - 2, fencYuv->m_buf[0], fencYuv->m_size, intraMode.reconYuv.m_buf[0], intraMode.reconYuv.m_size);
}
updateModeCost(intraMode);
- checkDQP(cu, cuGeom);
+ checkDQP(intraMode, cuGeom);
}
/* Note that this function does not save the best intra prediction, it must
@@ -1400,7 +1400,7 @@
m_entropyCoder.store(intraMode.contexts);
updateModeCost(intraMode);
- checkDQP(intraMode.cu, cuGeom);
+ checkDQP(intraMode, cuGeom);
}
uint32_t Search::estIntraPredQT(Mode &intraMode, const CUGeom& cuGeom, const uint32_t depthRange[2], uint8_t* sharedModes)
@@ -2620,7 +2620,7 @@
interMode.coeffBits = coeffBits;
interMode.mvBits = bits - coeffBits;
updateModeCost(interMode);
- checkDQP(interMode.cu, cuGeom);
+ checkDQP(interMode, cuGeom);
}
void Search::residualTransformQuantInter(Mode& mode, const CUGeom& cuGeom, uint32_t absPartIdx, uint32_t tuDepth, const uint32_t depthRange[2])
@@ -3437,22 +3437,29 @@
}
}
-void Search::checkDQP(CUData& cu, const CUGeom& cuGeom)
+void Search::checkDQP(Mode& mode, const CUGeom& cuGeom)
{
+ CUData& cu = mode.cu;
if (cu.m_slice->m_pps->bUseDQP && cuGeom.depth <= cu.m_slice->m_pps->maxCuDQPDepth)
{
if (cu.getQtRootCbf(0))
{
- /* When analysing RDO with DQP bits, the entropy encoder should add the cost of DQP bits here
- * i.e Encode QP */
+ mode.contexts.resetBits();
+ mode.contexts.codeDeltaQP(cu, 0);
+ uint32_t bits = mode.contexts.getNumberOfWrittenBits();
+ mode.mvBits += bits;
+ mode.totalBits += bits;
+ updateModeCost(mode);
}
else
cu.setQPSubParts(cu.getRefQP(0), 0, cuGeom.depth);
}
}
-void Search::checkDQPForSplitPred(CUData& cu, const CUGeom& cuGeom)
+void Search::checkDQPForSplitPred(Mode& mode, const CUGeom& cuGeom)
{
+ CUData& cu = mode.cu;
+
if ((cuGeom.depth == cu.m_slice->m_pps->maxCuDQPDepth) && cu.m_slice->m_pps->bUseDQP)
{
bool hasResidual = false;
@@ -3467,10 +3474,17 @@
}
}
if (hasResidual)
- /* TODO: Encode QP, and recalculate RD cost of splitPred */
+ {
+ mode.contexts.resetBits();
+ mode.contexts.codeDeltaQP(cu, 0);
+ uint32_t bits = mode.contexts.getNumberOfWrittenBits();
+ mode.mvBits += bits;
+ mode.totalBits += bits;
+ updateModeCost(mode);
/* For all zero CBF sub-CUs, reset QP to RefQP (so that deltaQP is not signalled).
When the non-zero CBF sub-CU is found, stop */
cu.setQPSubCUs(cu.getRefQP(0), 0, cuGeom.depth);
+ }
else
/* No residual within this CU or subCU, so reset QP to RefQP */
cu.setQPSubParts(cu.getRefQP(0), 0, cuGeom.depth);
diff -r 3ec6052eaf9c -r c20bac870095 source/encoder/search.h
--- a/source/encoder/search.h Fri Apr 17 14:02:26 2015 -0700
+++ b/source/encoder/search.h Sat Apr 18 11:16:08 2015 +0530
@@ -316,8 +316,8 @@
void getBestIntraModeChroma(Mode& intraMode, const CUGeom& cuGeom);
/* update CBF flags and QP values to be internally consistent */
- void checkDQP(CUData& cu, const CUGeom& cuGeom);
- void checkDQPForSplitPred(CUData& cu, const CUGeom& cuGeom);
+ void checkDQP(Mode& mode, const CUGeom& cuGeom);
+ void checkDQPForSplitPred(Mode& mode, const CUGeom& cuGeom);
class PME : public BondedTaskGroup
{
More information about the x265-devel
mailing list