1 void Analysis::compressIntraCU(const CUData& parentCTU, const CUGeom& cuGeom, uint32_t& zOrder) 2 { 3 uint32_t depth = cuGeom.depth;//geometric CU几何结构 4 ModeDepth& md = m_modeDepth[depth]; 5 md.bestMode = NULL; 6 7 bool mightSplit = !(cuGeom.flags & CUGeom::LEAF);//为ture非叶子节点,还需继续分裂 8 bool mightNotSplit = !(cuGeom.flags & CUGeom::SPLIT_MANDATORY);// 9 10 if (m_param->analysisMode == X265_ANALYSIS_LOAD) 11 { 12 uint8_t* reuseDepth = &m_reuseIntraDataCTU->depth[parentCTU.m_cuAddr * parentCTU.m_numPartitions]; 13 uint8_t* reuseModes = &m_reuseIntraDataCTU->modes[parentCTU.m_cuAddr * parentCTU.m_numPartitions]; 14 char* reusePartSizes = &m_reuseIntraDataCTU->partSizes[parentCTU.m_cuAddr * parentCTU.m_numPartitions]; 15 16 if (mightNotSplit && depth == reuseDepth[zOrder] && zOrder == cuGeom.encodeIdx) 17 { 18 m_quant.setQPforQuant(parentCTU); 19 20 PartSize size = (PartSize)reusePartSizes[zOrder]; 21 Mode& mode = size == SIZE_2Nx2N ? md.pred[PRED_INTRA] : md.pred[PRED_INTRA_NxN]; 22 mode.cu.initSubCU(parentCTU, cuGeom); 23 checkIntra(mode, cuGeom, size, &reuseModes[zOrder]); 24 checkBestMode(mode, depth); 25 26 if (m_bTryLossless) 27 tryLossless(cuGeom); 28 29 if (mightSplit) 30 addSplitFlagCost(*md.bestMode, cuGeom.depth); 31 32 // increment zOrder offset to point to next best depth in sharedDepth buffer 33 zOrder += g_depthInc[g_maxCUDepth - 1][reuseDepth[zOrder]]; 34 mightSplit = false; 35 } 36 } 37 else if (mightNotSplit) 38 { 39 m_quant.setQPforQuant(parentCTU); 40 41 md.pred[PRED_INTRA].cu.initSubCU(parentCTU, cuGeom); 42 checkIntra(md.pred[PRED_INTRA], cuGeom, SIZE_2Nx2N, NULL);//intra2Nx2N 模式 43 checkBestMode(md.pred[PRED_INTRA], depth); 44 45 if (depth == g_maxCUDepth) 46 { 47 md.pred[PRED_INTRA_NxN].cu.initSubCU(parentCTU, cuGeom); 48 checkIntra(md.pred[PRED_INTRA_NxN], cuGeom, SIZE_NxN, NULL);//INTRA_NxN 模式 49 checkBestMode(md.pred[PRED_INTRA_NxN], depth); 50 } 51 52 if (m_bTryLossless) 53 tryLossless(cuGeom); 54 55 if (mightSplit) 56 addSplitFlagCost(*md.bestMode, cuGeom.depth); 57 } 58 59 if (mightSplit) 60 { 61 Mode* splitPred = &md.pred[PRED_SPLIT]; 62 splitPred->initCosts(); 63 CUData* splitCU = &splitPred->cu; 64 splitCU->initSubCU(parentCTU, cuGeom);//分裂成4四subCU 65 66 uint32_t nextDepth = depth + 1; 67 ModeDepth& nd = m_modeDepth[nextDepth]; 68 invalidateContexts(nextDepth); 69 Entropy* nextContext = &m_rqt[depth].cur; 70 71 for (uint32_t subPartIdx = 0; subPartIdx < 4; subPartIdx++) 72 { 73 const CUGeom& childGeom = *(&cuGeom + cuGeom.childOffset + subPartIdx); 74 if (childGeom.flags & CUGeom::PRESENT) 75 { 76 m_modeDepth[0].fencYuv.copyPartToYuv(nd.fencYuv, childGeom.encodeIdx); 77 m_rqt[nextDepth].cur.load(*nextContext); 78 compressIntraCU(parentCTU, childGeom, zOrder);//递归 79 80 // Save best CU and pred data for this sub CU 81 splitCU->copyPartFrom(nd.bestMode->cu, childGeom, subPartIdx); 82 splitPred->addSubCosts(*nd.bestMode); 83 nd.bestMode->reconYuv.copyToPartYuv(splitPred->reconYuv, childGeom.numPartitions * subPartIdx); 84 nextContext = &nd.bestMode->contexts; 85 } 86 else 87 { 88 /* record the depth of this non-present sub-CU */ 89 splitCU->setEmptyPart(childGeom, subPartIdx); 90 zOrder += g_depthInc[g_maxCUDepth - 1][nextDepth]; 91 } 92 } 93 nextContext->store(splitPred->contexts); 94 if (mightNotSplit) 95 addSplitFlagCost(*splitPred, cuGeom.depth); 96 else 97 updateModeCost(*splitPred); 98 checkBestMode(*splitPred, depth); 99 } 100 101 checkDQP(md.bestMode->cu, cuGeom); 102 103 /* Copy best data to encData CTU and recon */ 104 md.bestMode->cu.copyToPic(depth); 105 if (md.bestMode != &md.pred[PRED_SPLIT]) 106 md.bestMode->reconYuv.copyToPicYuv(*m_frame->m_reconPic, parentCTU.m_cuAddr, cuGeom.encodeIdx); 107 }
时间: 2024-10-21 10:56:30