openh264
- OpenH264是一个开源的H.264/AVC视频编解码器,由Cisco公司发起并贡献了最初的代码基础。它提供了一个用于视频编码和解码的库,支持H.264视频压缩标准,广泛应用于视频会议、流媒体和视频存储等领域。
- OpenH264是实现H.264编解码功能的一个很好的选择,特别是对于需要免费解决方案的场景。
- OpenH264的一些关键特性:
开源:OpenH264遵循BSD风格的开源许可证,允许任何人自由使用、修改和分发。
跨平台:支持多种操作系统,包括Windows、Linux、Mac OS X等。
高性能:提供了高效的编码和解码算法,能够处理高分辨率视频。
硬件加速:支持多种硬件加速技术,如Intel Quick Sync Video、NVIDIA CUDA等。
编码配置灵活:允许用户根据需要配置编码参数,如分辨率、帧率、比特率等。
解码能力:除了编码功能外,OpenH264也提供了解码能力,能够解码H.264编码的视频流。
API接口:提供了一套API接口,方便开发者集成到自己的应用程序中。
社区支持:作为一个开源项目,OpenH264得到了活跃的社区支持,不断有新的功能和改进被加入。
自适应量化功能源码文件位置
openh264/codec/processing/adaptivequantization/AdaptiveQuantization.cpp
自适应量化功能源码文件流程
- 说明:
- 可以看到目前 openh264 选择关闭自适应量化算法,需要重构。
- 实现自适应量化的核心功能就是 Process 函数。
自适应量化功能原理分析
- 功能:实现视频编码自适应量化处理,涉及到图像的运动和纹理分析,以及量化参数的动态调整等。
- 过程:
- 初始化一些变量,包括图像宽度、高度、宏块(MB)宽度和高度,以及总宏块数等。
- 声明了一些用于存储运动纹理单元SMotionTextureUnit和SVAACalcResult(Variable Adaptive Analysis)计算结果的指针;其中通过VAACalcSadSsdBgd_c/VAACalcSadBgd_c函数计算
SVAACalcResult结构体中变量。- 初始化了一些用于计算量化步长和量化参数的变量。
- 获取源图像和参考图像的Y分量的指针以及它们的跨度(stride)。
- 进入运动分析部分,计算宏块的运动残差方差和纹理方差。
- 如果SVAACalcResult计算结果中的指针与传入的图像指针相同;
- 则使用SVAACalcResult结果;
- 对每个宏块,根据运动和纹理分析结果,计算运动指数iAverageMotionIndex和纹理指数iAverageTextureIndex,并累加到平均值中。
- 否则,
- 调用m_pfVar函数进行运动和纹理分析;
- 对每个宏块,根据运动和纹理分析结果,计算运动指数iAverageMotionIndex和纹理指数iAverageTextureIndex,并累加到平均值中。
- 计算平均运动指数和纹理指数,并进行一些条件判断和调整。
- 双层 for 循环处理每个宏块;
- 根据运动和纹理指数映射到量化参数(QP),计算iMotionTextureIndexToDeltaQp;
- 根自适应量化模式(AQ_QUALITY_MODE或AQ_BITRATE_MODE),调整量化参数iMotionTextureIndexToDeltaQp。
- 将计算出的量化参数映射存储到m_sAdaptiveQuantParam结构体中iAverMotionTextureIndexToDeltaQp。
- 设置返回值为成功(RET_SUCCESS)。
- 相关源码:
Process
函数
EResult CAdaptiveQuantization::Process (int32_t iType, SPixMap* pSrcPixMap, SPixMap* pRefPixMap) {EResult eReturn = RET_INVALIDPARAM;int32_t iWidth = pSrcPixMap->sRect.iRectWidth;int32_t iHeight = pSrcPixMap->sRect.iRectHeight;int32_t iMbWidth = iWidth >> 4;int32_t iMbHeight = iHeight >> 4;int32_t iMbTotalNum = iMbWidth * iMbHeight;SMotionTextureUnit* pMotionTexture = NULL;SVAACalcResult* pVaaCalcResults = NULL;int32_t iMotionTextureIndexToDeltaQp = 0;int32_t iAverMotionTextureIndexToDeltaQp = 0; // double to uint32int64_t iAverageMotionIndex = 0; // double to floatint64_t iAverageTextureIndex = 0;int64_t iQStep = 0;int64_t iLumaMotionDeltaQp = 0;int64_t iLumaTextureDeltaQp = 0;uint8_t* pRefFrameY = NULL, *pCurFrameY = NULL;int32_t iRefStride = 0, iCurStride = 0;uint8_t* pRefFrameTmp = NULL, *pCurFrameTmp = NULL;int32_t i = 0, j = 0;pRefFrameY = (uint8_t*)pRefPixMap->pPixel[0];pCurFrameY = (uint8_t*)pSrcPixMap->pPixel[0];iRefStride = pRefPixMap->iStride[0];iCurStride = pSrcPixMap->iStride[0];/// motion //// motion MB residual varianceiAverageMotionIndex = 0;iAverageTextureIndex = 0;pMotionTexture = m_sAdaptiveQuantParam.pMotionTextureUnit;pVaaCalcResults = m_sAdaptiveQuantParam.pCalcResult;if (pVaaCalcResults->pRefY == pRefFrameY && pVaaCalcResults->pCurY == pCurFrameY) {int32_t iMbIndex = 0;int32_t iSumDiff, iSQDiff, uiSum, iSQSum;for (j = 0; j < iMbHeight; j ++) {pRefFrameTmp = pRefFrameY;pCurFrameTmp = pCurFrameY;for (i = 0; i < iMbWidth; i++) {iSumDiff = pVaaCalcResults->pSad8x8[iMbIndex][0];iSumDiff += pVaaCalcResults->pSad8x8[iMbIndex][1];iSumDiff += pVaaCalcResults->pSad8x8[iMbIndex][2];iSumDiff += pVaaCalcResults->pSad8x8[iMbIndex][3];iSQDiff = pVaaCalcResults->pSsd16x16[iMbIndex];uiSum = pVaaCalcResults->pSum16x16[iMbIndex];iSQSum = pVaaCalcResults->pSumOfSquare16x16[iMbIndex];iSumDiff = iSumDiff >> 8;pMotionTexture->uiMotionIndex = (iSQDiff >> 8) - (iSumDiff * iSumDiff);uiSum = uiSum >> 8;pMotionTexture->uiTextureIndex = (iSQSum >> 8) - (uiSum * uiSum);iAverageMotionIndex += pMotionTexture->uiMotionIndex;iAverageTextureIndex += pMotionTexture->uiTextureIndex;pMotionTexture++;++iMbIndex;pRefFrameTmp += MB_WIDTH_LUMA;pCurFrameTmp += MB_WIDTH_LUMA;}pRefFrameY += (iRefStride) << 4;pCurFrameY += (iCurStride) << 4;}} else {for (j = 0; j < iMbHeight; j ++) {pRefFrameTmp = pRefFrameY;pCurFrameTmp = pCurFrameY;for (i = 0; i < iMbWidth; i++) {m_pfVar (pRefFrameTmp, iRefStride, pCurFrameTmp, iCurStride, pMotionTexture);iAverageMotionIndex += pMotionTexture->uiMotionIndex;iAverageTextureIndex += pMotionTexture->uiTextureIndex;pMotionTexture++;pRefFrameTmp += MB_WIDTH_LUMA;pCurFrameTmp += MB_WIDTH_LUMA;}pRefFrameY += (iRefStride) << 4;pCurFrameY += (iCurStride) << 4;}}iAverageMotionIndex = WELS_DIV_ROUND64 (iAverageMotionIndex * AQ_INT_MULTIPLY, iMbTotalNum);iAverageTextureIndex = WELS_DIV_ROUND64 (iAverageTextureIndex * AQ_INT_MULTIPLY, iMbTotalNum);if ((iAverageMotionIndex <= AQ_PESN) && (iAverageMotionIndex >= -AQ_PESN)) {iAverageMotionIndex = AQ_INT_MULTIPLY;}if ((iAverageTextureIndex <= AQ_PESN) && (iAverageTextureIndex >= -AQ_PESN)) {iAverageTextureIndex = AQ_INT_MULTIPLY;}// motion mb residual map to QP// texture mb original map to QPiAverMotionTextureIndexToDeltaQp = 0;iAverageMotionIndex = WELS_DIV_ROUND64 (AVERAGE_TIME_MOTION * iAverageMotionIndex, AQ_TIME_INT_MULTIPLY);if (m_sAdaptiveQuantParam.iAdaptiveQuantMode == AQ_QUALITY_MODE) {iAverageTextureIndex = WELS_DIV_ROUND64 (AVERAGE_TIME_TEXTURE_QUALITYMODE * iAverageTextureIndex, AQ_TIME_INT_MULTIPLY);} else {iAverageTextureIndex = WELS_DIV_ROUND64 (AVERAGE_TIME_TEXTURE_BITRATEMODE * iAverageTextureIndex, AQ_TIME_INT_MULTIPLY);}int64_t iAQ_EPSN = - ((int64_t)AQ_PESN * AQ_TIME_INT_MULTIPLY * AQ_QSTEP_INT_MULTIPLY / AQ_INT_MULTIPLY);pMotionTexture = m_sAdaptiveQuantParam.pMotionTextureUnit;for (j = 0; j < iMbHeight; j ++) {for (i = 0; i < iMbWidth; i++) {int64_t a = WELS_DIV_ROUND64 ((int64_t) (pMotionTexture->uiTextureIndex) * AQ_INT_MULTIPLY * AQ_TIME_INT_MULTIPLY,iAverageTextureIndex);iQStep = WELS_DIV_ROUND64 ((a - AQ_TIME_INT_MULTIPLY) * AQ_QSTEP_INT_MULTIPLY, (a + MODEL_ALPHA));iLumaTextureDeltaQp = MODEL_TIME * iQStep;// range +- 6iMotionTextureIndexToDeltaQp = ((int32_t) (iLumaTextureDeltaQp / (AQ_TIME_INT_MULTIPLY)));a = WELS_DIV_ROUND64 (((int64_t)pMotionTexture->uiMotionIndex) * AQ_INT_MULTIPLY * AQ_TIME_INT_MULTIPLY,iAverageMotionIndex);iQStep = WELS_DIV_ROUND64 ((a - AQ_TIME_INT_MULTIPLY) * AQ_QSTEP_INT_MULTIPLY, (a + MODEL_ALPHA));iLumaMotionDeltaQp = MODEL_TIME * iQStep;// range +- 6if ((m_sAdaptiveQuantParam.iAdaptiveQuantMode == AQ_QUALITY_MODE && iLumaMotionDeltaQp < iAQ_EPSN)|| (m_sAdaptiveQuantParam.iAdaptiveQuantMode == AQ_BITRATE_MODE)) {iMotionTextureIndexToDeltaQp += ((int32_t) (iLumaMotionDeltaQp / (AQ_TIME_INT_MULTIPLY)));}m_sAdaptiveQuantParam.pMotionTextureIndexToDeltaQp[j * iMbWidth + i] = (int8_t) (iMotionTextureIndexToDeltaQp /AQ_QSTEP_INT_MULTIPLY);iAverMotionTextureIndexToDeltaQp += iMotionTextureIndexToDeltaQp;pMotionTexture++;}}m_sAdaptiveQuantParam.iAverMotionTextureIndexToDeltaQp = iAverMotionTextureIndexToDeltaQp / iMbTotalNum;eReturn = RET_SUCCESS;return eReturn;
}
m_pfVar
函数(指向SampleVariance16x16_c
函数)
void SampleVariance16x16_c (uint8_t* pRefY, int32_t iRefStride, uint8_t* pSrcY, int32_t iSrcStride,SMotionTextureUnit* pMotionTexture) {uint32_t uiCurSquare = 0, uiSquare = 0;uint16_t uiCurSum = 0, uiSum = 0;for (int32_t y = 0; y < MB_WIDTH_LUMA; y++) {for (int32_t x = 0; x < MB_WIDTH_LUMA; x++) {uint32_t uiDiff = WELS_ABS (pRefY[x] - pSrcY[x]);uiSum += uiDiff;uiSquare += uiDiff * uiDiff;uiCurSum += pSrcY[x];uiCurSquare += pSrcY[x] * pSrcY[x];}pRefY += iRefStride;pSrcY += iSrcStride;}uiSum = uiSum >> 8;pMotionTexture->uiMotionIndex = (uiSquare >> 8) - (uiSum * uiSum);uiCurSum = uiCurSum >> 8;pMotionTexture->uiTextureIndex = (uiCurSquare >> 8) - (uiCurSum * uiCurSum);
}