[x265] [PATCH] analysis: re-order RD 5/6 analysis to do splits before ME or intra
ashok at multicorewareinc.com
ashok at multicorewareinc.com
Tue Jun 23 17:15:05 CEST 2015
# HG changeset patch
# User Ashok Kumar Mishra<ashok at multicorewareinc.com>
# Date 1432215988 -19800
# Thu May 21 19:16:28 2015 +0530
# Node ID 0ea7f7c9544ffdf67c35e234dcf6ca1abbf83c2c
# Parent dfdf378a3968a15a1465a3aa3098e507fb4f10e5
analysis: re-order RD 5/6 analysis to do splits before ME or intra
This commit changes outputs because splits used to be avoided when an inter or
intra mode was chosen without residual coding. This recursion early-out is no
longer possible. Only merge without residual (aka skip) can abort recursion.
This commit changes the order of analysis such that the four split blocks are
analyzed prior to attempting any ME or intra modes. Future commits we will use
the knowledge learned during split analysis to avoid unlikely work at the
current depth (reducing motion references avoiding unlikely intra, rectangular,
asymmetric, and lossless modes)
diff -r dfdf378a3968 -r 0ea7f7c9544f source/encoder/analysis.cpp
--- a/source/encoder/analysis.cpp Mon Jun 22 17:44:14 2015 -0500
+++ b/source/encoder/analysis.cpp Thu May 21 19:16:28 2015 +0530
@@ -1170,14 +1170,72 @@
}
}
+ bool foundSkip = false;
+ /* Step 1. Evaluate Merge/Skip candidates for likely early-outs */
if (mightNotSplit)
{
md.pred[PRED_SKIP].cu.initSubCU(parentCTU, cuGeom, qp);
md.pred[PRED_MERGE].cu.initSubCU(parentCTU, cuGeom, qp);
checkMerge2Nx2N_rd5_6(md.pred[PRED_SKIP], md.pred[PRED_MERGE], cuGeom, false);
- bool earlySkip = m_param->bEnableEarlySkip && md.bestMode && !md.bestMode->cu.getQtRootCbf(0);
+ foundSkip = md.bestMode && !md.bestMode->cu.getQtRootCbf(0);
+ }
- if (!earlySkip)
+ // estimate split cost
+ /* Step 2. Evaluate each of the 4 split sub-blocks in series */
+ if (mightSplit && !foundSkip)
+ {
+ Mode* splitPred = &md.pred[PRED_SPLIT];
+ splitPred->initCosts();
+ CUData* splitCU = &splitPred->cu;
+ splitCU->initSubCU(parentCTU, cuGeom, qp);
+
+ uint32_t nextDepth = depth + 1;
+ ModeDepth& nd = m_modeDepth[nextDepth];
+ invalidateContexts(nextDepth);
+ Entropy* nextContext = &m_rqt[depth].cur;
+ int nextQP = qp;
+
+ for (uint32_t subPartIdx = 0; subPartIdx < 4; subPartIdx++)
+ {
+ const CUGeom& childGeom = *(&cuGeom + cuGeom.childOffset + subPartIdx);
+ if (childGeom.flags & CUGeom::PRESENT)
+ {
+ m_modeDepth[0].fencYuv.copyPartToYuv(nd.fencYuv, childGeom.absPartIdx);
+ m_rqt[nextDepth].cur.load(*nextContext);
+
+ if (m_slice->m_pps->bUseDQP && nextDepth <= m_slice->m_pps->maxCuDQPDepth)
+ nextQP = setLambdaFromQP(parentCTU, calculateQpforCuSize(parentCTU, childGeom));
+
+ compressInterCU_rd5_6(parentCTU, childGeom, zOrder, nextQP);
+
+ // Save best CU and pred data for this sub CU
+ splitCU->copyPartFrom(nd.bestMode->cu, childGeom, subPartIdx);
+ splitPred->addSubCosts(*nd.bestMode);
+ nd.bestMode->reconYuv.copyToPartYuv(splitPred->reconYuv, childGeom.numPartitions * subPartIdx);
+ nextContext = &nd.bestMode->contexts;
+ }
+ else
+ {
+ splitCU->setEmptyPart(childGeom, subPartIdx);
+ zOrder += g_depthInc[g_maxCUDepth - 1][nextDepth];
+ }
+ }
+ nextContext->store(splitPred->contexts);
+ if (mightNotSplit)
+ addSplitFlagCost(*splitPred, cuGeom.depth);
+ else
+ updateModeCost(*splitPred);
+
+ checkDQPForSplitPred(*splitPred, cuGeom);
+ }
+
+ /* Step 3. Evaluate ME (2Nx2N, rect, amp) and intra modes at current depth */
+ if (mightNotSplit)
+ {
+ if (m_slice->m_pps->bUseDQP && depth <= m_slice->m_pps->maxCuDQPDepth && m_slice->m_pps->maxCuDQPDepth != 0)
+ setLambdaFromQP(parentCTU, qp);
+
+ if (!(foundSkip && m_param->bEnableEarlySkip))
{
md.pred[PRED_2Nx2N].cu.initSubCU(parentCTU, cuGeom, qp);
checkInter_rd5_6(md.pred[PRED_2Nx2N], cuGeom, SIZE_2Nx2N);
@@ -1263,59 +1321,13 @@
addSplitFlagCost(*md.bestMode, cuGeom.depth);
}
- // estimate split cost
- if (mightSplit && (!md.bestMode || !md.bestMode->cu.isSkipped(0)))
- {
- Mode* splitPred = &md.pred[PRED_SPLIT];
- splitPred->initCosts();
- CUData* splitCU = &splitPred->cu;
- splitCU->initSubCU(parentCTU, cuGeom, qp);
-
- uint32_t nextDepth = depth + 1;
- ModeDepth& nd = m_modeDepth[nextDepth];
- invalidateContexts(nextDepth);
- Entropy* nextContext = &m_rqt[depth].cur;
- int nextQP = qp;
-
- for (uint32_t subPartIdx = 0; subPartIdx < 4; subPartIdx++)
- {
- const CUGeom& childGeom = *(&cuGeom + cuGeom.childOffset + subPartIdx);
- if (childGeom.flags & CUGeom::PRESENT)
- {
- m_modeDepth[0].fencYuv.copyPartToYuv(nd.fencYuv, childGeom.absPartIdx);
- m_rqt[nextDepth].cur.load(*nextContext);
-
- if (m_slice->m_pps->bUseDQP && nextDepth <= m_slice->m_pps->maxCuDQPDepth)
- nextQP = setLambdaFromQP(parentCTU, calculateQpforCuSize(parentCTU, childGeom));
-
- compressInterCU_rd5_6(parentCTU, childGeom, zOrder, nextQP);
-
- // Save best CU and pred data for this sub CU
- splitCU->copyPartFrom(nd.bestMode->cu, childGeom, subPartIdx);
- splitPred->addSubCosts(*nd.bestMode);
- nd.bestMode->reconYuv.copyToPartYuv(splitPred->reconYuv, childGeom.numPartitions * subPartIdx);
- nextContext = &nd.bestMode->contexts;
- }
- else
- {
- splitCU->setEmptyPart(childGeom, subPartIdx);
- zOrder += g_depthInc[g_maxCUDepth - 1][nextDepth];
- }
- }
- nextContext->store(splitPred->contexts);
- if (mightNotSplit)
- addSplitFlagCost(*splitPred, cuGeom.depth);
- else
- updateModeCost(*splitPred);
-
- checkDQPForSplitPred(*splitPred, cuGeom);
- checkBestMode(*splitPred, depth);
- }
+ /* compare split RD cost against best cost */
+ if (mightSplit && !foundSkip)
+ checkBestMode(md.pred[PRED_SPLIT], depth);
/* Copy best data to encData CTU and recon */
md.bestMode->cu.copyToPic(depth);
- if (md.bestMode != &md.pred[PRED_SPLIT])
- md.bestMode->reconYuv.copyToPicYuv(*m_frame->m_reconPic, parentCTU.m_cuAddr, cuGeom.absPartIdx);
+ md.bestMode->reconYuv.copyToPicYuv(*m_frame->m_reconPic, parentCTU.m_cuAddr, cuGeom.absPartIdx);
}
/* sets md.bestMode if a valid merge candidate is found, else leaves it NULL */
More information about the x265-devel
mailing list