zoukankan      html  css  js  c++  java
  • FFmpeg的H264解码源码分析:解码器

    解码器主要就是输入NALU,输出YUV数据

    AVCodec ff_h264_decoder = {
        .name                  = "h264",
        .long_name             = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
        .type                  = AVMEDIA_TYPE_VIDEO,
        .id                    = AV_CODEC_ID_H264,
        .priv_data_size        = sizeof(H264Context),
        .init                  = h264_decode_init,
        .close                 = h264_decode_end,
        .decode                = h264_decode_frame,
        .capabilities          = /*AV_CODEC_CAP_DRAW_HORIZ_BAND |*/ AV_CODEC_CAP_DR1 |
                                 AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS |
                                 AV_CODEC_CAP_FRAME_THREADS,
        .hw_configs            = (const AVCodecHWConfigInternal*[]) {
    #if CONFIG_H264_DXVA2_HWACCEL
                                   HWACCEL_DXVA2(h264),
    #endif
    #if CONFIG_H264_D3D11VA_HWACCEL
                                   HWACCEL_D3D11VA(h264),
    #endif
    #if CONFIG_H264_D3D11VA2_HWACCEL
                                   HWACCEL_D3D11VA2(h264),
    #endif
    #if CONFIG_H264_NVDEC_HWACCEL
                                   HWACCEL_NVDEC(h264),
    #endif
    #if CONFIG_H264_VAAPI_HWACCEL
                                   HWACCEL_VAAPI(h264),
    #endif
    #if CONFIG_H264_VDPAU_HWACCEL
                                   HWACCEL_VDPAU(h264),
    #endif
    #if CONFIG_H264_VIDEOTOOLBOX_HWACCEL
                                   HWACCEL_VIDEOTOOLBOX(h264),
    #endif
                                   NULL
                               },
        .caps_internal         = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_EXPORTS_CROPPING,
        .flush                 = flush_dpb,
        .init_thread_copy      = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
        .update_thread_context = ONLY_IF_THREADS_ENABLED(ff_h264_update_thread_context),
        .profiles              = NULL_IF_CONFIG_SMALL(ff_h264_profiles),
        .priv_class            = &h264_class,
    };

    h264_decode_init()函数

    static av_cold int h264_decode_init(AVCodecContext *avctx)
    {
        H264Context *h = avctx->priv_data;
        int ret;
    
        // 基本参数初始化
        ret = h264_init_context(avctx, h);
        if (ret < 0)
            return ret;
    
        // 初始化熵编码器
        ret = ff_thread_once(&h264_vlc_init, ff_h264_decode_init_vlc);
        if (ret != 0) {
            av_log(avctx, AV_LOG_ERROR, "pthread_once has failed.");
            return AVERROR_UNKNOWN;
        }
    
        if (avctx->ticks_per_frame == 1) {
            if(h->avctx->time_base.den < INT_MAX/2) {
                h->avctx->time_base.den *= 2;
            } else
                h->avctx->time_base.num /= 2;
        }
        avctx->ticks_per_frame = 2;
    
        // 初始化extradata
        if (avctx->extradata_size > 0 && avctx->extradata) {
            ret = ff_h264_decode_extradata(avctx->extradata, avctx->extradata_size,
                                           &h->ps, &h->is_avc, &h->nal_length_size,
                                           avctx->err_recognition, avctx);
            if (ret < 0) { 
                h264_decode_end(avctx);
                return ret;
            }
        }
    
        if (h->ps.sps && h->ps.sps->bitstream_restriction_flag &&
            h->avctx->has_b_frames < h->ps.sps->num_reorder_frames) {
            h->avctx->has_b_frames = h->ps.sps->num_reorder_frames;
        }
    
        avctx->internal->allocate_progress = 1;
    
        ff_h264_flush_change(h);
    
        if (h->enable_er < 0 && (avctx->active_thread_type & FF_THREAD_SLICE))
            h->enable_er = 0;
    
        if (h->enable_er && (avctx->active_thread_type & FF_THREAD_SLICE)) {
            av_log(avctx, AV_LOG_WARNING,
                   "Error resilience with slice threads is enabled. It is unsafe and unsupported and may crash. "
                   "Use it at your own risk
    ");
        }
    
        return 0;
    }
    h264_decode_init主要是一些基本参数的初始化,还有熵编码器初始化和解析extradata。

    h264_decode_frame()函数

    static int h264_decode_frame(AVCodecContext *avctx, void *data,
                                 int *got_frame, AVPacket *avpkt)
    {
        const uint8_t *buf = avpkt->data;
        int buf_size       = avpkt->size;
        H264Context *h     = avctx->priv_data;
        AVFrame *pict      = data;
        int buf_index;
        int ret;
    
        h->flags = avctx->flags;
        h->setup_finished = 0;
        h->nb_slice_ctx_queued = 0;
    
        ff_h264_unref_picture(h, &h->last_pic_for_ec);
    
        /* end of stream, output what is still in the buffers */
        if (buf_size == 0)
            return send_next_delayed_frame(h, pict, got_frame, 0);
    
        if (h->is_avc && av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, NULL)) {
            int side_size;
            uint8_t *side = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &side_size);
            if (is_extra(side, side_size))
                ff_h264_decode_extradata(side, side_size,
                                         &h->ps, &h->is_avc, &h->nal_length_size,
                                         avctx->err_recognition, avctx);
        }
        if (h->is_avc && buf_size >= 9 && buf[0]==1 && buf[2]==0 && (buf[4]&0xFC)==0xFC) {
            if (is_extra(buf, buf_size))
                return ff_h264_decode_extradata(buf, buf_size,
                                                &h->ps, &h->is_avc, &h->nal_length_size,
                                                avctx->err_recognition, avctx);
        }
    
        // 解析nalu
        buf_index = decode_nal_units(h, buf, buf_size);
        if (buf_index < 0)
            return AVERROR_INVALIDDATA;
    
        if (!h->cur_pic_ptr && h->nal_unit_type == H264_NAL_END_SEQUENCE) {
            av_assert0(buf_index <= buf_size);
            return send_next_delayed_frame(h, pict, got_frame, buf_index);
        }
    
        if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) && (!h->cur_pic_ptr || !h->has_slice)) {
            if (avctx->skip_frame >= AVDISCARD_NONREF ||
                buf_size >= 4 && !memcmp("Q264", buf, 4))
                return buf_size;
            av_log(avctx, AV_LOG_ERROR, "no frame!
    ");
            return AVERROR_INVALIDDATA;
        }
    
        if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) ||
            (h->mb_y >= h->mb_height && h->mb_height)) {
            if ((ret = ff_h264_field_end(h, &h->slice_ctx[0], 0)) < 0)
                return ret;
    
            /* Wait for second field. */
            // 输出到pict
            if (h->next_output_pic) {
                ret = finalize_frame(h, pict, h->next_output_pic, got_frame);
                if (ret < 0)
                    return ret;
            }
        }
    
        av_assert0(pict->buf[0] || !*got_frame);
    
        ff_h264_unref_picture(h, &h->last_pic_for_ec);
    
        return get_consumed_bytes(buf_index, buf_size);
    }

    这里主要就是解析nalu,将结果输出到pict

    static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size)
    {
        AVCodecContext *const avctx = h->avctx;
        int nals_needed = 0; ///< number of NALs that need decoding before the next frame thread starts
        int idr_cleared=0;
        int i, ret = 0;
    
        h->has_slice = 0;
        h->nal_unit_type= 0;
    
        if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) {
            h->current_slice = 0;
            if (!h->first_field) {
                h->cur_pic_ptr = NULL;
                ff_h264_sei_uninit(&h->sei);
            }
        }
    
        if (h->nal_length_size == 4) {
            if (buf_size > 8 && AV_RB32(buf) == 1 && AV_RB32(buf+5) > (unsigned)buf_size) {
                h->is_avc = 0;
            }else if(buf_size > 3 && AV_RB32(buf) > 1 && AV_RB32(buf) <= (unsigned)buf_size)
                h->is_avc = 1;
        }
    
        ret = ff_h2645_packet_split(&h->pkt, buf, buf_size, avctx, h->is_avc, h->nal_length_size,
                                    avctx->codec_id, avctx->flags2 & AV_CODEC_FLAG2_FAST, 0);
        if (ret < 0) {
            av_log(avctx, AV_LOG_ERROR,
                   "Error splitting the input into NAL units.
    ");
            return ret;
        }
    
        if (avctx->active_thread_type & FF_THREAD_FRAME)
            nals_needed = get_last_needed_nal(h);
        if (nals_needed < 0)
            return nals_needed;
    
        for (i = 0; i < h->pkt.nb_nals; i++) {
            H2645NAL *nal = &h->pkt.nals[i];
            int max_slice_ctx, err;
    
            if (avctx->skip_frame >= AVDISCARD_NONREF &&
                nal->ref_idc == 0 && nal->type != H264_NAL_SEI)
                continue;
    
            // FIXME these should stop being context-global variables
            h->nal_ref_idc   = nal->ref_idc;
            h->nal_unit_type = nal->type;
    
            err = 0;
            switch (nal->type) {
            case H264_NAL_IDR_SLICE:
                if ((nal->data[1] & 0xFC) == 0x98) {
                    av_log(h->avctx, AV_LOG_ERROR, "Invalid inter IDR frame
    ");
                    h->next_outputed_poc = INT_MIN;
                    ret = -1;
                    goto end;
                }
                if(!idr_cleared) {
                    idr(h); // FIXME ensure we don't lose some frames if there is reordering
                }
                idr_cleared = 1;
                h->has_recovery_point = 1;
            case H264_NAL_SLICE:
                h->has_slice = 1;
    
                if ((err = ff_h264_queue_decode_slice(h, nal))) {
                    H264SliceContext *sl = h->slice_ctx + h->nb_slice_ctx_queued;
                    sl->ref_count[0] = sl->ref_count[1] = 0;
                    break;
                }
    
                if (h->current_slice == 1) {
                    if (avctx->active_thread_type & FF_THREAD_FRAME &&
                        i >= nals_needed && !h->setup_finished && h->cur_pic_ptr) {
                        ff_thread_finish_setup(avctx);
                        h->setup_finished = 1;
                    }
    
                    if (h->avctx->hwaccel &&
                        (ret = h->avctx->hwaccel->start_frame(h->avctx, buf, buf_size)) < 0)
                        goto end;
                }
    
                max_slice_ctx = avctx->hwaccel ? 1 : h->nb_slice_ctx;
                if (h->nb_slice_ctx_queued == max_slice_ctx) {
                    if (h->avctx->hwaccel) {
                        ret = avctx->hwaccel->decode_slice(avctx, nal->raw_data, nal->raw_size);
                        h->nb_slice_ctx_queued = 0;
                    } else
                        ret = ff_h264_execute_decode_slices(h);
                    if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
                        goto end;
                }
                break;
            case H264_NAL_DPA:
            case H264_NAL_DPB:
            case H264_NAL_DPC:
                avpriv_request_sample(avctx, "data partitioning");
                break;
            case H264_NAL_SEI:
                ret = ff_h264_sei_decode(&h->sei, &nal->gb, &h->ps, avctx);
                h->has_recovery_point = h->has_recovery_point || h->sei.recovery_point.recovery_frame_cnt != -1;
                if (avctx->debug & FF_DEBUG_GREEN_MD)
                    debug_green_metadata(&h->sei.green_metadata, h->avctx);
                if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
                    goto end;
                break;
            case H264_NAL_SPS: {
                GetBitContext tmp_gb = nal->gb;
                if (avctx->hwaccel && avctx->hwaccel->decode_params) {
                    ret = avctx->hwaccel->decode_params(avctx,
                                                        nal->type,
                                                        nal->raw_data,
                                                        nal->raw_size);
                    if (ret < 0)
                        goto end;
                }
                if (ff_h264_decode_seq_parameter_set(&tmp_gb, avctx, &h->ps, 0) >= 0)
                    break;
                av_log(h->avctx, AV_LOG_DEBUG,
                       "SPS decoding failure, trying again with the complete NAL
    ");
                init_get_bits8(&tmp_gb, nal->raw_data + 1, nal->raw_size - 1);
                if (ff_h264_decode_seq_parameter_set(&tmp_gb, avctx, &h->ps, 0) >= 0)
                    break;
                ff_h264_decode_seq_parameter_set(&nal->gb, avctx, &h->ps, 1);
                break;
            }
            case H264_NAL_PPS:
                if (avctx->hwaccel && avctx->hwaccel->decode_params) {
                    ret = avctx->hwaccel->decode_params(avctx,
                                                        nal->type,
                                                        nal->raw_data,
                                                        nal->raw_size);
                    if (ret < 0)
                        goto end;
                }
                ret = ff_h264_decode_picture_parameter_set(&nal->gb, avctx, &h->ps,
                                                           nal->size_bits);
                if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
                    goto end;
                break;
            case H264_NAL_AUD:
            case H264_NAL_END_SEQUENCE:
            case H264_NAL_END_STREAM:
            case H264_NAL_FILLER_DATA:
            case H264_NAL_SPS_EXT:
            case H264_NAL_AUXILIARY_SLICE:
                break;
            default:
                av_log(avctx, AV_LOG_DEBUG, "Unknown NAL code: %d (%d bits)
    ",
                       nal->type, nal->size_bits);
            }
    
            if (err < 0) {
                av_log(h->avctx, AV_LOG_ERROR, "decode_slice_header error
    ");
            }
        }
    
        ret = ff_h264_execute_decode_slices(h);
        if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
            goto end;
    
        // set decode_error_flags to allow users to detect concealed decoding errors
        if ((ret < 0 || h->slice_ctx->er.error_occurred) && h->cur_pic_ptr) {
            h->cur_pic_ptr->f->decode_error_flags |= FF_DECODE_ERROR_DECODE_SLICES;
        }
    
        ret = 0;
    end:
    
    #if CONFIG_ERROR_RESILIENCE
        /*
         * FIXME: Error handling code does not seem to support interlaced
         * when slices span multiple rows
         * The ff_er_add_slice calls don't work right for bottom
         * fields; they cause massive erroneous error concealing
         * Error marking covers both fields (top and bottom).
         * This causes a mismatched s->error_count
         * and a bad error table. Further, the error count goes to
         * INT_MAX when called for bottom field, because mb_y is
         * past end by one (callers fault) and resync_mb_y != 0
         * causes problems for the first MB line, too.
         */
        if (!FIELD_PICTURE(h) && h->current_slice &&
            h->ps.sps == (const SPS*)h->ps.sps_list[h->ps.pps->sps_id]->data &&
            h->enable_er) {
    
            H264SliceContext *sl = h->slice_ctx;
            int use_last_pic = h->last_pic_for_ec.f->buf[0] && !sl->ref_count[0];
    
            ff_h264_set_erpic(&sl->er.cur_pic, h->cur_pic_ptr);
    
            if (use_last_pic) {
                ff_h264_set_erpic(&sl->er.last_pic, &h->last_pic_for_ec);
                sl->ref_list[0][0].parent = &h->last_pic_for_ec;
                memcpy(sl->ref_list[0][0].data, h->last_pic_for_ec.f->data, sizeof(sl->ref_list[0][0].data));
                memcpy(sl->ref_list[0][0].linesize, h->last_pic_for_ec.f->linesize, sizeof(sl->ref_list[0][0].linesize));
                sl->ref_list[0][0].reference = h->last_pic_for_ec.reference;
            } else if (sl->ref_count[0]) {
                ff_h264_set_erpic(&sl->er.last_pic, sl->ref_list[0][0].parent);
            } else
                ff_h264_set_erpic(&sl->er.last_pic, NULL);
    
            if (sl->ref_count[1])
                ff_h264_set_erpic(&sl->er.next_pic, sl->ref_list[1][0].parent);
    
            sl->er.ref_count = sl->ref_count[0];
    
            ff_er_frame_end(&sl->er);
            if (use_last_pic)
                memset(&sl->ref_list[0][0], 0, sizeof(sl->ref_list[0][0]));
        }
    #endif /* CONFIG_ERROR_RESILIENCE */
        /* clean up */
        if (h->cur_pic_ptr && !h->droppable && h->has_slice) {
            ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
                                      h->picture_structure == PICT_BOTTOM_FIELD);
        }
    
        return (ret < 0) ? ret : buf_size;
    }

    基于不同的类型,分别解析。

    h264_decode_end()函数
    static av_cold int h264_decode_end(AVCodecContext *avctx)
    {
        H264Context *h = avctx->priv_data;
        int i;
    
        ff_h264_remove_all_refs(h);
        ff_h264_free_tables(h);
    
        for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
            ff_h264_unref_picture(h, &h->DPB[i]);
            av_frame_free(&h->DPB[i].f);
        }
        memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
    
        h->cur_pic_ptr = NULL;
    
        av_freep(&h->slice_ctx);
        h->nb_slice_ctx = 0;
    
        ff_h264_sei_uninit(&h->sei);
        ff_h264_ps_uninit(&h->ps);
    
        ff_h2645_packet_uninit(&h->pkt);
    
        ff_h264_unref_picture(h, &h->cur_pic);
        av_frame_free(&h->cur_pic.f);
        ff_h264_unref_picture(h, &h->last_pic_for_ec);
        av_frame_free(&h->last_pic_for_ec.f);
    
        return 0;
    }

    这里主要就是释放申请的一些内存

  • 相关阅读:
    常用Linux命令
    SQL必知必会-笔记
    【ubuntu】install openbox+tint2+bmenu on ubuntu12.04.4
    【ruby】安装Ruby
    【ruby】快速安装gems的方法
    【sinatra】设置默认的端口
    【sinatra】修改默认ip绑定
    【sinatra】结合Padrino framework
    【sinatra】安装测试
    【rails3教材】博客构建过程2
  • 原文地址:https://www.cnblogs.com/vczf/p/14920124.html
Copyright © 2011-2022 走看看