H.265/HEVC(High Efficiency Video Coding)のリファレンスソフトウェア(HM)のTAppEncTop::encode関数について学んでいきます。前提として入力画像はQCIF(176×144)、YCbCr 4:2:0、configファイルはcfg/misc以下のencoder_randomaccess_main_GOP8.cfgを用いるものとして話を進めます。
TAppEncTop::encode()
TAppEncTop::encode()は大きく前半と後半に分けられます。前半は主にエンコードを実行するための各種パラメータの設定や、画像のバッファ領域確保を行っています。後半はフレーム単位で画像を読み出し、m_cTEncTop.encode()でエンコードを実行します。この処理をm_framesToBeEncodedに設定されたフレーム数に達するまで、whileループで繰り返します。
Void TAppEncTop::encode()
{
// 出力するビットストリームをOpen
fstream bitstreamFile(m_bitstreamFileName.c_str(), fstream::binary | fstream::out);
if (!bitstreamFile)
{
fprintf(stderr, "\nfailed to open bitstream file `%s' for writing\n", m_bitstreamFileName.c_str());
exit(EXIT_FAILURE);
}
// バッファ領域を確保するクラス
TComPicYuv* pcPicYuvOrg = new TComPicYuv;
TComPicYuv* pcPicYuvRec = NULL;
// JVET_X0048_X0103_FILM_GRAINのデフォルト値は1(source/Lib/TLibCommon/TypeDef.hに定義)
#if JVET_X0048_X0103_FILM_GRAIN
TComPicYuv* m_filteredOrgPicForFG;
// 今回はm_fgcSEIAnalysisEnabled=false(configでSEIFGCAnalysisEnable=0)なのでelseの方が実行される
if (m_fgcSEIAnalysisEnabled && m_fgcSEIExternalDenoised.empty())
{
m_filteredOrgPicForFG = new TComPicYuv;
m_filteredOrgPicForFG->create(m_sourceWidth, m_sourceHeight, m_chromaFormatIDC, m_uiMaxCUWidth, m_uiMaxCUHeight, m_uiMaxTotalCUDepth, true);
}
else
{
m_filteredOrgPicForFG = NULL;
}
#endif
// initialize internal class & member variables
// TEncTopクラスの変数m_cTEncTopの内部変数の初期化
xInitLibCfg();
xCreateLib();
xInitLib(m_isField);
printChromaFormat();
// main encoder loop
Int iNumEncoded = 0;
Bool bEos = false;
// 今回はipCSC=snrCSC=IPCOLOURSPACE_UNCHANGED(0)となる
const InputColourSpaceConversion ipCSC = m_inputColourSpaceConvert;
const InputColourSpaceConversion snrCSC = (!m_snrInternalColourSpace) ? m_inputColourSpaceConvert : IPCOLOURSPACE_UNCHANGED;
list<AccessUnit> outputAccessUnits; ///< list of access units to write out. is populated by the encoding process
TComPicYuv cPicYuvTrueOrg;
// allocate original YUV buffer
if( m_isField )
{
pcPicYuvOrg->create ( m_sourceWidth, m_sourceHeightOrg, m_chromaFormatIDC, m_uiMaxCUWidth, m_uiMaxCUHeight, m_uiMaxTotalCUDepth, true );
cPicYuvTrueOrg.create(m_sourceWidth, m_sourceHeightOrg, m_chromaFormatIDC, m_uiMaxCUWidth, m_uiMaxCUHeight, m_uiMaxTotalCUDepth, true);
}
else
{
pcPicYuvOrg->create ( m_sourceWidth, m_sourceHeight, m_chromaFormatIDC, m_uiMaxCUWidth, m_uiMaxCUHeight, m_uiMaxTotalCUDepth, true );
cPicYuvTrueOrg.create(m_sourceWidth, m_sourceHeight, m_chromaFormatIDC, m_uiMaxCUWidth, m_uiMaxCUHeight, m_uiMaxTotalCUDepth, true );
}
// EXTENSION_360_VIDEOのデフォルト値は0
#if EXTENSION_360_VIDEO
TExt360AppEncTop ext360(*this, m_cTEncTop.getGOPEncoder()->getExt360Data(), *(m_cTEncTop.getGOPEncoder()), *pcPicYuvOrg);
#endif
TEncTemporalFilter temporalFilter;
// JVET_Y0077_BIMのデフォルト値は1
#if JVET_Y0077_BIM
// 今回はm_gopBasedTemporalFilterEnable=false(configでTemporalFilter=0)かつ
// m_bimEnabled=false(configでBIM=0)なのでif文中は実行されない
if ( m_gopBasedTemporalFilterEnabled || m_bimEnabled )
#else
if (m_gopBasedTemporalFilterEnabled)
#endif
{
temporalFilter.init(m_FrameSkip, m_inputBitDepth, m_MSBExtendedBitDepth, m_internalBitDepth, m_sourceWidth, m_sourceHeight,
m_sourcePadding, m_framesToBeEncoded, m_bClipInputVideoToRec709Range, m_inputFileName, m_chromaFormatIDC,
m_inputColourSpaceConvert, m_iQP, m_iGOPSize, m_gopBasedTemporalFilterStrengths,
m_gopBasedTemporalFilterPastRefs, m_gopBasedTemporalFilterFutureRefs,
#if !JVET_Y0077_BIM
m_firstValidFrame, m_lastValidFrame);
#else
m_firstValidFrame, m_lastValidFrame,
m_gopBasedTemporalFilterEnabled, m_cTEncTop.getAdaptQPmap(), m_bimEnabled);
#endif
}
// JVET_X0048_X0103_FILM_GRAINのデフォルト値は1
#if JVET_X0048_X0103_FILM_GRAIN
TEncTemporalFilter m_temporalFilterForFG;
// 今回はm_fgcSEIAnalysisEnabled=falseなのでif文中は実行されない
if ( m_fgcSEIAnalysisEnabled && m_fgcSEIExternalDenoised.empty() )
{
int filteredFrame = 0;
if ( m_iIntraPeriod < 1 )
filteredFrame = 2 * m_iFrameRate;
else
filteredFrame = m_iIntraPeriod;
map<int, double> filteredFramesAndStrengths = { { filteredFrame, 1.5 } }; // TODO: adjust MCTF and MCTF strenght
m_temporalFilterForFG.init(m_FrameSkip, m_inputBitDepth, m_MSBExtendedBitDepth, m_internalBitDepth, m_sourceWidth, m_sourceHeight,
m_sourcePadding, m_framesToBeEncoded, m_bClipInputVideoToRec709Range, m_inputFileName, m_chromaFormatIDC,
m_inputColourSpaceConvert, m_iQP, m_iGOPSize, filteredFramesAndStrengths,
m_gopBasedTemporalFilterPastRefs, m_gopBasedTemporalFilterFutureRefs,
#if !JVET_Y0077_BIM
m_firstValidFrame, m_lastValidFrame);
#else
m_firstValidFrame, m_lastValidFrame,
m_gopBasedTemporalFilterEnabled, m_cTEncTop.getAdaptQPmap(), m_bimEnabled);
#endif
}
#endif
// ここから繰り返しエンコード実行
while ( !bEos )
{
// get buffers
// m_cListPicYuvRecのリストサイズがm_iGOPSize(8)未満の場合, pcPicYuvRecをnewしてリストに追加する
// m_cListPicYuvRecのリストサイズがm_iGOPSize(8)以上の場合, リストの先頭をリストの末尾に移動させリングバッファのように動作する
xGetBuffer(pcPicYuvRec);
// read input YUV file
// EXTENSION_360_VIDEOのデフォルト値は0
#if EXTENSION_360_VIDEO
if (ext360.isEnabled())
{
ext360.read(m_cTVideoIOYuvInputFile, *pcPicYuvOrg, cPicYuvTrueOrg, ipCSC);
}
else
{
m_cTVideoIOYuvInputFile.read( pcPicYuvOrg, &cPicYuvTrueOrg, ipCSC, m_sourcePadding, m_InputChromaFormatIDC, m_bClipInputVideoToRec709Range );
}
#else
// ファイルから1フレーム分を読み込み
m_cTVideoIOYuvInputFile.read( pcPicYuvOrg, &cPicYuvTrueOrg, ipCSC, m_sourcePadding, m_InputChromaFormatIDC, m_bClipInputVideoToRec709Range );
#endif
// m_fgcSEIAnalysisEnabled=falseなのでif文中は実行されない
#if JVET_X0048_X0103_FILM_GRAIN
if (m_fgcSEIAnalysisEnabled && m_fgcSEIExternalDenoised.empty())
{
pcPicYuvOrg->copyToPic(m_filteredOrgPicForFG);
m_temporalFilterForFG.filter(m_filteredOrgPicForFG, m_iFrameRcvd);
}
#endif
// m_gopBasedTemporalFilterEnabled=falseなのでif文中は実行されない
#if JVET_Y0077_BIM
if ( m_gopBasedTemporalFilterEnabled || m_bimEnabled )
#else
if (m_gopBasedTemporalFilterEnabled)
#endif
{
temporalFilter.filter(pcPicYuvOrg, m_iFrameRcvd);
}
// increase number of received frames
m_iFrameRcvd++;
// エンコードすべきフレーム数まで達したらbEos=true
bEos = (m_isField && (m_iFrameRcvd == (m_framesToBeEncoded >> 1) )) || ( !m_isField && (m_iFrameRcvd == m_framesToBeEncoded) );
Bool flush = 0;
// if end of file (which is only detected on a read failure) flush the encoder of any queued pictures
if (m_cTVideoIOYuvInputFile.isEof())
{
flush = true;
bEos = true;
m_iFrameRcvd--;
m_cTEncTop.setFramesToBeEncoded(m_iFrameRcvd);
}
// call encoding function for one frame
if ( m_isField )
{
m_cTEncTop.encode( bEos, flush ? 0 : pcPicYuvOrg, flush ? 0 : &cPicYuvTrueOrg, ipCSC, snrCSC, m_cListPicYuvRec, outputAccessUnits, iNumEncoded, m_isTopFieldFirst );
}
else
{
// JVET_X0048_X0103_FILM_GRAINのデフォルト値は1なので上のencodeが実行される
#if JVET_X0048_X0103_FILM_GRAIN
m_cTEncTop.encode( bEos, flush ? 0 : pcPicYuvOrg, flush ? 0 : &cPicYuvTrueOrg, flush ? 0 : m_filteredOrgPicForFG, ipCSC, snrCSC, m_cListPicYuvRec, outputAccessUnits, iNumEncoded);
#else
m_cTEncTop.encode( bEos, flush ? 0 : pcPicYuvOrg, flush ? 0 : &cPicYuvTrueOrg, ipCSC, snrCSC, m_cListPicYuvRec, outputAccessUnits, iNumEncoded );
#endif
}
// SHUTTER_INTERVAL_SEI_PROCESSINGのデフォルト値は1
#if SHUTTER_INTERVAL_SEI_PROCESSING
// 今回はm_ShutterFilterEnable=falseなのでif文中は実行されない
if (m_ShutterFilterEnable && !m_shutterIntervalPreFileName.empty())
{
m_cTVideoIOYuvSIIPreFile.write(pcPicYuvOrg, ipCSC, m_confWinLeft, m_confWinRight, m_confWinTop, m_confWinBottom,
NUM_CHROMA_FORMAT, m_bClipOutputVideoToRec709Range);
}
#endif
// write bistream to file if necessary
if ( iNumEncoded > 0 )
{
xWriteOutput(bitstreamFile, iNumEncoded, outputAccessUnits);
outputAccessUnits.clear();
}
// temporally skip frames
if( m_temporalSubsampleRatio > 1 )
{
m_cTVideoIOYuvInputFile.skipFrames(m_temporalSubsampleRatio-1, m_inputFileWidth, m_inputFileHeight, m_InputChromaFormatIDC);
}
} // while ( !bEos )
m_cTEncTop.printSummary(m_isField);
// delete original YUV buffer
pcPicYuvOrg->destroy();
delete pcPicYuvOrg;
pcPicYuvOrg = NULL;
#if JVET_X0048_X0103_FILM_GRAIN
if (m_fgcSEIAnalysisEnabled && m_fgcSEIExternalDenoised.empty())
{
m_filteredOrgPicForFG->destroy();
delete m_filteredOrgPicForFG;
m_filteredOrgPicForFG = NULL;
}
#endif
// delete used buffers in encoder class
m_cTEncTop.deletePicBuffer();
cPicYuvTrueOrg.destroy();
// delete buffers & classes
xDeleteBuffer();
xDestroyLib();
printRateSummary();
return;
}
TAppEncTop::xInitLibCfg()
TAppEncTop::xInitLibCfg()は500行以上の長い関数ですが、VPS(Video Parameter Set)の一部を設定し、その他のTAppEncTopクラスの各種パラメータ値をTEncTopクラスの変数m_cTEncTopに渡しているだけです。
Void TAppEncTop::xInitLibCfg()
{
TComVPS vps;
vps.setMaxTLayers ( m_maxTempLayer );
if (m_maxTempLayer == 1)
{
vps.setTemporalNestingFlag(true);
}
vps.setMaxLayers ( 1 );
for(Int i = 0; i < MAX_TLAYER; i++)
{
vps.setNumReorderPics ( m_numReorderPics[i], i );
vps.setMaxDecPicBuffering ( m_maxDecPicBuffering[i], i );
}
m_cTEncTop.setVPS(&vps);
m_cTEncTop.setProfile ( m_profile);
m_cTEncTop.setLevel ( m_levelTier, m_level);
m_cTEncTop.setProgressiveSourceFlag ( m_progressiveSourceFlag);
m_cTEncTop.setInterlacedSourceFlag ( m_interlacedSourceFlag);
m_cTEncTop.setNonPackedConstraintFlag ( m_nonPackedConstraintFlag);
m_cTEncTop.setFrameOnlyConstraintFlag ( m_frameOnlyConstraintFlag);
m_cTEncTop.setBitDepthConstraintValue ( m_bitDepthConstraint );
m_cTEncTop.setChromaFormatConstraintValue ( m_chromaFormatConstraint );
m_cTEncTop.setIntraConstraintFlag ( m_intraConstraintFlag );
m_cTEncTop.setOnePictureOnlyConstraintFlag ( m_onePictureOnlyConstraintFlag );
m_cTEncTop.setLowerBitRateConstraintFlag ( m_lowerBitRateConstraintFlag );
...
m_cTEncTop.setSummaryOutFilename ( m_summaryOutFilename );
m_cTEncTop.setSummaryPicFilenameBase ( m_summaryPicFilenameBase );
m_cTEncTop.setSummaryVerboseness ( m_summaryVerboseness );
#if JCTVC_AD0021_SEI_MANIFEST
m_cTEncTop.setSEIManifestSEIEnabled(m_SEIManifestSEIEnabled);
#endif
#if JCTVC_AD0021_SEI_PREFIX_INDICATION
m_cTEncTop.setSEIPrefixIndicationSEIEnabled(m_SEIPrefixIndicationSEIEnabled);
#endif
}
TAppEncTop::xCreateLib()
TAppEncTop::xCreateLib()ではTVideoIOYuvクラスの変数m_cTVideoIOYuvInputFileに入力ファイルを読み込み、m_cTEncTop.create()を実行します。
Void TAppEncTop::xCreateLib()
{
// Video I/O
// 入力ファイルをOpenし, TVideoIOYuvクラスの内部変数m_cHandleに保持する
m_cTVideoIOYuvInputFile.open( m_inputFileName, false, m_inputBitDepth, m_MSBExtendedBitDepth, m_internalBitDepth ); // read mode
m_cTVideoIOYuvInputFile.skipFrames(m_FrameSkip, m_inputFileWidth, m_inputFileHeight, m_InputChromaFormatIDC);
if (!m_reconFileName.empty())
{
// 出力ファイルをOpenする
m_cTVideoIOYuvReconFile.open(m_reconFileName, true, m_outputBitDepth, m_outputBitDepth, m_internalBitDepth); // write mode
}
#if SHUTTER_INTERVAL_SEI_PROCESSING
if (m_ShutterFilterEnable && !m_shutterIntervalPreFileName.empty())
{
m_cTVideoIOYuvSIIPreFile.open(m_shutterIntervalPreFileName, true, m_outputBitDepth, m_outputBitDepth, m_internalBitDepth); // write mode
}
#endif
// Neo Decoder
m_cTEncTop.create();
}
TVideoIOYuvクラスについては以下で解析しています。
TEncTop::create()
TEncTop::create()ではinitROM()でグローバル変数を初期化するとともに、TEncTopクラスで利用する内部クラスの初期化を行います。
Void TEncTop::create ()
{
// initialize global variables
initROM();
// create processing unit classes
m_cGOPEncoder. create( );
m_cSliceEncoder. create( getSourceWidth(), getSourceHeight(), m_chromaFormatIDC, m_maxCUWidth, m_maxCUHeight, m_maxTotalCUDepth );
m_cCuEncoder. create( m_maxTotalCUDepth, m_maxCUWidth, m_maxCUHeight, m_chromaFormatIDC );
if (m_bUseSAO)
{
m_cEncSAO.create( getSourceWidth(), getSourceHeight(), m_chromaFormatIDC, m_maxCUWidth, m_maxCUHeight, m_maxTotalCUDepth, m_log2SaoOffsetScale[CHANNEL_TYPE_LUMA], m_log2SaoOffsetScale[CHANNEL_TYPE_CHROMA] );
m_cEncSAO.createEncData(getSaoCtuBoundary());
}
#if ADAPTIVE_QP_SELECTION
if (m_bUseAdaptQpSelect)
{
m_cTrQuant.initSliceQpDelta();
}
#endif
m_cLoopFilter.create( m_maxTotalCUDepth );
if ( m_RCEnableRateControl )
{
#if JVET_Y0105_SW_AND_QDF
m_cRateCtrl.init( m_framesToBeEncoded, m_RCTargetBitrate, (Int)( (Double)m_iFrameRate/m_temporalSubsampleRatio + 0.5), m_iGOPSize, m_uiIntraPeriod, m_iSourceWidth, m_iSourceHeight,
m_maxCUWidth, m_maxCUHeight,m_RCKeepHierarchicalBit, m_RCUseLCUSeparateModel, m_GOPList );
#else
m_cRateCtrl.init( m_framesToBeEncoded, m_RCTargetBitrate, (Int)( (Double)m_iFrameRate/m_temporalSubsampleRatio + 0.5), m_iGOPSize, m_iSourceWidth, m_iSourceHeight,
m_maxCUWidth, m_maxCUHeight,m_RCKeepHierarchicalBit, m_RCUseLCUSeparateModel, m_GOPList );
#endif
}
m_pppcRDSbacCoder = new TEncSbac** [m_maxTotalCUDepth+1];
#if FAST_BIT_EST
m_pppcBinCoderCABAC = new TEncBinCABACCounter** [m_maxTotalCUDepth+1];
#else
m_pppcBinCoderCABAC = new TEncBinCABAC** [m_maxTotalCUDepth+1];
#endif
for ( Int iDepth = 0; iDepth < m_maxTotalCUDepth+1; iDepth++ )
{
m_pppcRDSbacCoder[iDepth] = new TEncSbac* [CI_NUM];
#if FAST_BIT_EST
m_pppcBinCoderCABAC[iDepth] = new TEncBinCABACCounter* [CI_NUM];
#else
m_pppcBinCoderCABAC[iDepth] = new TEncBinCABAC* [CI_NUM];
#endif
for (Int iCIIdx = 0; iCIIdx < CI_NUM; iCIIdx ++ )
{
m_pppcRDSbacCoder[iDepth][iCIIdx] = new TEncSbac;
#if FAST_BIT_EST
m_pppcBinCoderCABAC [iDepth][iCIIdx] = new TEncBinCABACCounter;
#else
m_pppcBinCoderCABAC [iDepth][iCIIdx] = new TEncBinCABAC;
#endif
m_pppcRDSbacCoder [iDepth][iCIIdx]->init( m_pppcBinCoderCABAC [iDepth][iCIIdx] );
}
}
}
TAppEncTop::xInitLib()
TAppEncTop::xInitLib()はTEncTop::init()を呼んでいるだけです。
Void TAppEncTop::xInitLib(Bool isFieldCoding)
{
m_cTEncTop.init(isFieldCoding);
}
TEncTop::init()
TEncTop::init()はSPS、VPS、PPS、RPS等のパラメータを初期化するとともに、TEncTopクラスで利用する内部クラスの初期化を行います。
Void TEncTop::init(Bool isFieldCoding)
{
TComSPS &sps0=*(m_spsMap.allocatePS(0)); // NOTE: implementations that use more than 1 SPS need to be aware of activation issues.
TComPPS &pps0=*(m_ppsMap.allocatePS(0));
// initialize SPS
xInitSPS(sps0);
xInitVPS(m_cVPS, sps0);
if (m_RCCpbSaturationEnabled)
{
m_cRateCtrl.initHrdParam(sps0.getVuiParameters()->getHrdParameters(), m_iFrameRate, m_RCInitialCpbFullness);
}
m_cRdCost.setCostMode(m_costMode);
// initialize PPS
xInitPPS(pps0, sps0);
xInitRPS(sps0, isFieldCoding);
xInitScalingLists(sps0, pps0);
if (m_wcgChromaQpControl.isEnabled())
{
TComPPS &pps1=*(m_ppsMap.allocatePS(1));
xInitPPS(pps1, sps0);
xInitScalingLists(sps0, pps1);
}
// initialize processing unit classes
m_cGOPEncoder. init( this );
m_cSliceEncoder.init( this );
m_cCuEncoder. init( this );
m_cCuEncoder.setSliceEncoder(&m_cSliceEncoder);
// initialize transform & quantization class
m_pcCavlcCoder = getCavlcCoder();
m_cTrQuant.init( 1 << m_uiQuadtreeTULog2MaxSize,
m_useRDOQ,
m_useRDOQTS,
m_useSelectiveRDOQ,
true
,m_useTransformSkipFast
#if ADAPTIVE_QP_SELECTION
,m_bUseAdaptQpSelect
#endif
);
// initialize encoder search class
m_cSearch.init( this, &m_cTrQuant, m_iSearchRange, m_bipredSearchRange, m_motionEstimationSearchMethod, m_maxCUWidth, m_maxCUHeight, m_maxTotalCUDepth, &m_cEntropyCoder, &m_cRdCost, getRDSbacCoder(), getRDGoOnSbacCoder() );
m_iMaxRefPicNum = 0;
}
TEncTop::encode()
TEncTop::encode()は条件を満たす場合にのみm_cGOPEncoder.compressGOP()を実行し、条件を満たさない場合はそのままreturnします。compressGOP()が実行されると、iNumEncoded、m_iNumPicRcvd、m_uiNumAllPicCodedの3つの変数の値が更新されます。
// JVET_X0048_X0103_FILM_GRAINのデフォルト値は1なので上の定義が実行される
#if JVET_X0048_X0103_FILM_GRAIN
Void TEncTop::encode(Bool flush, TComPicYuv* pcPicYuvOrg, TComPicYuv* pcPicYuvTrueOrg, TComPicYuv* pcfilteredOrgPicForFG, const InputColourSpaceConversion ipCSC, const InputColourSpaceConversion snrCSC, TComList<TComPicYuv*>& rcListPicYuvRecOut, std::list<AccessUnit>& accessUnitsOut, Int& iNumEncoded)
#else
Void TEncTop::encode( Bool flush, TComPicYuv* pcPicYuvOrg, TComPicYuv* pcPicYuvTrueOrg, const InputColourSpaceConversion ipCSC, const InputColourSpaceConversion snrCSC, TComList<TComPicYuv*>& rcListPicYuvRecOut, std::list<AccessUnit>& accessUnitsOut, Int& iNumEncoded )
#endif
{
if (pcPicYuvOrg != NULL)
{
// get original YUV
TComPic* pcPicCurr = NULL;
Int ppsID=-1; // Use default PPS ID
// 今回はm_wcgChromaQpControl.enable=false(configでWCGPPSEnable=0)なのでif文中は実行されない
if (getWCGChromaQPControl().isEnabled())
{
ppsID=getdQPs()[ m_iPOCLast+1 ];
}
// m_iPOCLastとm_iNumPicRcvdが+1インクリメントされる
xGetNewPicBuffer( pcPicCurr, ppsID );
pcPicYuvOrg->copyToPic( pcPicCurr->getPicYuvOrg() );
pcPicYuvTrueOrg->copyToPic( pcPicCurr->getPicYuvTrueOrg() );
// 今回はm_fgcSEIAnalysisEnabled=falseなのでif文中は実行されない
#if JVET_X0048_X0103_FILM_GRAIN
if (m_fgcSEIAnalysisEnabled && m_fgcSEIExternalDenoised.empty())
{
pcfilteredOrgPicForFG->copyToPic(pcPicCurr->getPicFilteredFG());
}
#endif
// 今回はm_ShutterFilterEnable=falseなのでif文中は実行されない
#if SHUTTER_INTERVAL_SEI_PROCESSING
if ( getShutterFilterFlag() )
{
pcPicCurr->xOutputPreFilteredPic(pcPicCurr, &m_cListPic);
pcPicCurr->getPicYuvOrg()->copyToPic(pcPicYuvOrg);
}
#endif
// compute image characteristics
// 今回はm_bUseAdaptiveQP=false(configでAdaptiveQP=0)なのでif文中は実行されない
if ( getUseAdaptiveQP() )
{
m_cPreanalyzer.xPreanalyze( dynamic_cast<TEncPic*>( pcPicCurr ) );
}
} // if (pcPicYuvOrg != NULL)
// m_cGOPEncoder.compressGOP()を実行しない条件を列挙
if ((m_iNumPicRcvd == 0) || (!flush && (m_iPOCLast != 0) && (m_iNumPicRcvd != m_iGOPSize) && (m_iGOPSize != 0)))
{
iNumEncoded = 0;
return;
}
// 今回はm_RCEnableRateControl=falseなのでif文中は実行されない
if ( m_RCEnableRateControl )
{
m_cRateCtrl.initRCGOP( m_iNumPicRcvd );
}
// compress GOP
m_cGOPEncoder.compressGOP(m_iPOCLast, m_iNumPicRcvd, m_cListPic, rcListPicYuvRecOut, accessUnitsOut, false, false, ipCSC, snrCSC, getOutputLogControl());
// 今回はm_RCEnableRateControl=falseなのでif文中は実行されない
if ( m_RCEnableRateControl )
{
m_cRateCtrl.destroyRCGOP();
}
iNumEncoded = m_iNumPicRcvd;
m_iNumPicRcvd = 0;
m_uiNumAllPicCoded += iNumEncoded;
}
次の条件のいずれかが成り立つとき、compressGOP()は実行されません。
- m_iNumPicRcvd == 0
-
エンコードすべきフレームが存在しない
- !flush && (m_iPOCLast != 0) && (m_iNumPicRcvd != m_iGOPSize) && (m_iGOPSize != 0)
-
- flushモードではない(!flush)
- 先頭フレームではない(m_iPOCLast != 0)
- フレームをGOPサイズ分受信していない(m_iNumPicRcvd != m_iGOPSize)
- GOPサイズが0ではない(m_iGOPSize != 0)
compressGOP()が実行されるのは、入力映像の終端(flush=1)や先頭フレーム(POC=0)、あるいはGOPサイズ分の入力フレームを読み込んだタイミング(m_iNumPicRcvd=m_iGOPSize)となります。