对于每个宏块计算它的量化系数 就是在量化过程中,首先乘以DCT中那两个矩阵之后,还要乘以一个E,这里就是对于每个宏块自适应的计算出来这个矩阵,不用默认的。
没有分析完成 在expandborder中计算了 宏块相关内容
void x264_adaptive_quant_frame( x264_t *h, x264_frame_t *frame, float *quant_offsets )
{
/* constants chosen to result in approximately the same overall bitrate as without AQ.
* FIXME: while they're written in 5 significant digits, they're only tuned to 2. */
float strength;
float avg_adj = 0.f;
/* Initialize frame stats */
for( int i = 0; i < 3; i++ )
{
frame->i_pixel_sum[i] = 0;
frame->i_pixel_ssd[i] = 0;
}
/* Degenerate cases */
if( h->param.rc.i_aq_mode == X264_AQ_NONE || h->param.rc.f_aq_strength == 0 )
{
/* Need to init it anyways for MB tree */
if( h->param.rc.i_aq_mode && h->param.rc.f_aq_strength == 0 )
{
if( quant_offsets )
{
for( int mb_xy = 0; mb_xy < h->mb.i_mb_count; mb_xy++ )
frame->f_qp_offset[mb_xy] = frame->f_qp_offset_aq[mb_xy] = quant_offsets[mb_xy];
if( h->frames.b_have_lowres )
for( int mb_xy = 0; mb_xy < h->mb.i_mb_count; mb_xy++ )
frame->i_inv_qscale_factor[mb_xy] = x264_exp2fix8( frame->f_qp_offset[mb_xy] );
}
else
{
memset( frame->f_qp_offset, 0, h->mb.i_mb_count * sizeof(float) );
memset( frame->f_qp_offset_aq, 0, h->mb.i_mb_count * sizeof(float) );
if( h->frames.b_have_lowres )
for( int mb_xy = 0; mb_xy < h->mb.i_mb_count; mb_xy++ )
frame->i_inv_qscale_factor[mb_xy] = 256;
}
}
/* Need variance data for weighted prediction */
if( h->param.analyse.i_weighted_pred )
{
for( int mb_y = 0; mb_y < h->mb.i_mb_height; mb_y++ )
for( int mb_x = 0; mb_x < h->mb.i_mb_width; mb_x++ )
x264_ac_energy_mb( h, mb_x, mb_y, frame );
}
else
return;
}
/* Actual adaptive quantization */
else
{
if( h->param.rc.i_aq_mode == X264_AQ_AUTOVARIANCE )
{
float bit_depth_correction = powf(1 << (BIT_DEPTH-8), 0.5f);
float avg_adj_pow2 = 0.f;
for( int mb_y = 0; mb_y < h->mb.i_mb_height; mb_y++ )
for( int mb_x = 0; mb_x < h->mb.i_mb_width; mb_x++ )
{
uint32_t energy = x264_ac_energy_mb( h, mb_x, mb_y, frame );
float qp_adj = powf( energy + 1, 0.125f );
frame->f_qp_offset[mb_x + mb_y*h->mb.i_mb_stride] = qp_adj;
avg_adj += qp_adj;
avg_adj_pow2 += qp_adj * qp_adj;
}
avg_adj /= h->mb.i_mb_count;
avg_adj_pow2 /= h->mb.i_mb_count;
strength = h->param.rc.f_aq_strength * avg_adj / bit_depth_correction;
avg_adj = avg_adj - 0.5f * (avg_adj_pow2 - (14.f * bit_depth_correction)) / avg_adj;
}
else
strength = h->param.rc.f_aq_strength * 1.0397f;
for( int mb_y = 0; mb_y < h->mb.i_mb_height; mb_y++ )
for( int mb_x = 0; mb_x < h->mb.i_mb_width; mb_x++ )
{
float qp_adj;
int mb_xy = mb_x + mb_y*h->mb.i_mb_stride;
if( h->param.rc.i_aq_mode == X264_AQ_AUTOVARIANCE )
{
qp_adj = frame->f_qp_offset[mb_xy];
qp_adj = strength * (qp_adj - avg_adj);
}
else
{
uint32_t energy = x264_ac_energy_mb( h, mb_x, mb_y, frame );
qp_adj = strength * (x264_log2( X264_MAX(energy, 1) ) - (14.427f + 2*(BIT_DEPTH-8)));
}
if( quant_offsets )
qp_adj += quant_offsets[mb_xy];
frame->f_qp_offset[mb_xy] =
frame->f_qp_offset_aq[mb_xy] = qp_adj;
if( h->frames.b_have_lowres )
frame->i_inv_qscale_factor[mb_xy] = x264_exp2fix8(qp_adj);
}
}
/* Remove mean from SSD calculation */
for( int i = 0; i < 3; i++ )
{
uint64_t ssd = frame->i_pixel_ssd[i];
uint64_t sum = frame->i_pixel_sum[i];
int width = 16*h->mb.i_mb_width >> (i && CHROMA_H_SHIFT);
int height = 16*h->mb.i_mb_height >> (i && CHROMA_V_SHIFT);
frame->i_pixel_ssd[i] = ssd - (sum * sum + width * height / 2) / (width * height);
}
}