zoukankan      html  css  js  c++  java
  • ffmpeg实战系列——002

    Talk is cheap,Show me the code!

    示例1、decode_video.c

    int main(int argc, char **argv)

    {

        const char *filename, *outfilename;

        const AVCodec *codec;

        AVCodecContext *c= NULL;

        int frame_count;

        FILE *f;

        AVFrame *frame;

        uint8_t inbuf[INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];

        AVPacket avpkt;

        filename    = argv[1];

        outfilename = argv[2];

        avcodec_register_all();

    av_init_packet(&avpkt);

        codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO);

        c = avcodec_alloc_context3(codec);

        if (avcodec_open2(c, codec, NULL) < 0) {

        }

        frame = av_frame_alloc();

        frame_count = 0;

        for (;;) {

            avpkt.size = fread(inbuf, 1, INBUF_SIZE, f);

            avpkt.data = inbuf;

            while (avpkt.size > 0)

                if (decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 0) < 0)

                    exit(1);

        }

        avpkt.data = NULL;

        avpkt.size = 0;

        decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 1);

        fclose(f);

        avcodec_free_context(&c);

        av_frame_free(&frame);

        return 0;

    }

    static int decode_write_frame(const char *outfilename, AVCodecContext *avctx,

                                  AVFrame *frame, int *frame_count, AVPacket *pkt, int last)

    {

        len = avcodec_decode_video2(avctx, frame, &got_frame, pkt);

        return 0;

    }

    下面分析该例子涉及的几个数据结构:

    1、AVPacket avpkt;

    typedef struct AVPacket {

        AVBufferRef *buf;

        int64_t pts;

        int64_t dts;

        uint8_t *data;

        int   size;

        int   stream_index;

        int   flags;

        AVPacketSideData *side_data;

        int side_data_elems;

        int64_t duration;

        int64_t pos;                            ///< byte position in stream, -1 if unknown

    } AVPacket;

    2、AVFrame *frame;

    typedef struct AVFrame {

        uint8_t *data[AV_NUM_DATA_POINTERS];

        int linesize[AV_NUM_DATA_POINTERS];

        uint8_t **extended_data;

        int width, height;

        int nb_samples;

        int format;

        int key_frame;

        enum AVPictureType pict_type;

        AVRational sample_aspect_ratio;

        int64_t pts;

        int64_t pkt_dts;

        int coded_picture_number;

        int display_picture_number;

        int quality;

        void *opaque;

        int repeat_pict;

        int interlaced_frame;

        int top_field_first;

        int palette_has_changed;

        int64_t reordered_opaque;

        int sample_rate;

        uint64_t channel_layout;

        AVBufferRef *buf[AV_NUM_DATA_POINTERS];

        AVBufferRef **extended_buf;

        int        nb_extended_buf;

        AVFrameSideData **side_data;

        int            nb_side_data;

        int flags;

        enum AVColorRange color_range;

        enum AVColorPrimaries color_primaries;

        enum AVColorTransferCharacteristic color_trc;

        enum AVColorSpace colorspace;

        enum AVChromaLocation chroma_location;

        int64_t best_effort_timestamp;

        int64_t pkt_pos;

        int64_t pkt_duration;

        AVDictionary *metadata;

        int decode_error_flags;

        int channels;

        int pkt_size;

        AVBufferRef *hw_frames_ctx;

        AVBufferRef *opaque_ref;

    } AVFrame;

    3、const AVCodec *codec;

    codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO);

    AVCodec *avcodec_find_decoder(enum AVCodecID id)

    {

        return find_encdec(id, 0);

    }

    static AVCodec *find_encdec(enum AVCodecID id, int encoder)

    {

        AVCodec *p, *experimental = NULL;

        p = first_avcodec;

        id= remap_deprecated_codec_id(id);

        while (p) {

            if ((encoder ? av_codec_is_encoder(p) : av_codec_is_decoder(p)) &&

                p->id == id) {

                if (p->capabilities & AV_CODEC_CAP_EXPERIMENTAL && !experimental) {

                    experimental = p;

                } else

                    return p;

            }

            p = p->next;

        }

        return experimental;

    }

    AVCodec ff_mpeg1video_decoder = {

        .name                  = "mpeg1video",

        .long_name             = NULL_IF_CONFIG_SMALL("MPEG-1 video"),

        .type                  = AVMEDIA_TYPE_VIDEO,

        .id                    = AV_CODEC_ID_MPEG1VIDEO,

        .priv_data_size        = sizeof(Mpeg1Context),

        .init                  = mpeg_decode_init,

        .close                 = mpeg_decode_end,

        .decode                = mpeg_decode_frame,

        .capabilities          = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |

                                 AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY |

                                 AV_CODEC_CAP_SLICE_THREADS,

        .caps_internal         = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,

        .flush                 = flush,

        .max_lowres            = 3,

        .update_thread_context = ONLY_IF_THREADS_ENABLED(mpeg_decode_update_thread_context)

    };

    这里顺便介绍一下:

    typedef struct Mpeg1Context {

        MpegEncContext mpeg_enc_ctx;

        int mpeg_enc_ctx_allocated; /* true if decoding context allocated */

        int repeat_field;           /* true if we must repeat the field */

        AVPanScan pan_scan;         /* some temporary storage for the panscan */

        AVStereo3D stereo3d;

        int has_stereo3d;

        uint8_t *a53_caption;

        int a53_caption_size;

        uint8_t afd;

        int has_afd;

        int slice_count;

        AVRational save_aspect;

        int save_width, save_height, save_progressive_seq;

        AVRational frame_rate_ext;  /* MPEG-2 specific framerate modificator */

        int sync;                   /* Did we reach a sync point like a GOP/SEQ/KEYFrame? */

        int tmpgexs;

        int first_slice;

        int extradata_decoded;

    } Mpeg1Context;

    所以如果你要调用AVCodec ff_mpeg1video_decoder帮你干活,你首先要有一个Mpeg1Context,因为ff_mpeg1video_decoder的所有数据都来源于Mpeg1Context。而Mpeg1Context是在decode init的时候初始化的。

    4、AVCodecContext *c= NULL;

    c = avcodec_alloc_context3(codec);

    AVCodecContext *avcodec_alloc_context3(const AVCodec *codec)

    {

        AVCodecContext *avctx= av_malloc(sizeof(AVCodecContext));

        if (!avctx)

            return NULL;

        if (init_context_defaults(avctx, codec) < 0) {

            av_free(avctx);

            return NULL;

        }

        return avctx;

    }

    static int init_context_defaults(AVCodecContext *s, const AVCodec *codec)

    {

        int flags=0;

        memset(s, 0, sizeof(AVCodecContext));

        s->av_class = &av_codec_context_class;

        s->codec_type = codec ? codec->type : AVMEDIA_TYPE_UNKNOWN;

        if (codec) {

            s->codec = codec;

            s->codec_id = codec->id;

        }

        if(s->codec_type == AVMEDIA_TYPE_AUDIO)

            flags= AV_OPT_FLAG_AUDIO_PARAM;

        else if(s->codec_type == AVMEDIA_TYPE_VIDEO)

            flags= AV_OPT_FLAG_VIDEO_PARAM;

        else if(s->codec_type == AVMEDIA_TYPE_SUBTITLE)

            flags= AV_OPT_FLAG_SUBTITLE_PARAM;

        av_opt_set_defaults2(s, flags, flags);

        s->time_base           = (AVRational){0,1};

        s->framerate           = (AVRational){ 0, 1 };

        s->pkt_timebase        = (AVRational){ 0, 1 };

        s->get_buffer2         = avcodec_default_get_buffer2;

        s->get_format          = avcodec_default_get_format;

        s->execute             = avcodec_default_execute;

        s->execute2            = avcodec_default_execute2;

        s->sample_aspect_ratio = (AVRational){0,1};

        s->pix_fmt             = AV_PIX_FMT_NONE;

        s->sw_pix_fmt          = AV_PIX_FMT_NONE;

        s->sample_fmt          = AV_SAMPLE_FMT_NONE;

        s->reordered_opaque    = AV_NOPTS_VALUE;

        if(codec && codec->priv_data_size){

            if(!s->priv_data){

                s->priv_data= av_mallocz(codec->priv_data_size);

                if (!s->priv_data) {

                    return AVERROR(ENOMEM);

                }

            }

            if(codec->priv_class){

                *(const AVClass**)s->priv_data = codec->priv_class;

                av_opt_set_defaults(s->priv_data);

            }

        }

        if (codec && codec->defaults) {

            int ret;

            const AVCodecDefault *d = codec->defaults;

            while (d->key) {

                ret = av_opt_set(s, d->key, d->value, 0);

                av_assert0(ret >= 0);

                d++;

            }

        }

        return 0;

    }

    5、avcodec_open2(c, codec, NULL)

    初始化一些avcodeccontext的成员变量,最重要的是初始化解码器:

    ret = avctx->codec->init(avctx);

    //legacy decoder

    AVCodec ff_mpegvideo_decoder = {

        .name           = "mpegvideo",

        .long_name      = NULL_IF_CONFIG_SMALL("MPEG-1 video"),

        .type           = AVMEDIA_TYPE_VIDEO,

        .id             = AV_CODEC_ID_MPEG2VIDEO,

        .priv_data_size = sizeof(Mpeg1Context),

        .init           = mpeg_decode_init,

        .close          = mpeg_decode_end,

        .decode         = mpeg_decode_frame,

        .capabilities   = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS,

        .caps_internal  = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,

        .flush          = flush,

        .max_lowres     = 3,

    };

    这个太复杂,换一个H264的解码器吧,其实是一样的:

    AVCodec ff_h264_decoder = {

        .name                  = "h264",

        .long_name             = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),

        .type                  = AVMEDIA_TYPE_VIDEO,

        .id                    = AV_CODEC_ID_H264,

        .priv_data_size        = sizeof(H264Context),

        .init                  = ff_h264_decode_init,

        .close                 = h264_decode_end,

        .decode                = h264_decode_frame,

        .capabilities          = /*AV_CODEC_CAP_DRAW_HORIZ_BAND |*/ AV_CODEC_CAP_DR1 |

                                 AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS |

                                 AV_CODEC_CAP_FRAME_THREADS,

        .caps_internal         = FF_CODEC_CAP_INIT_THREADSAFE,

        .flush                 = flush_dpb,

        .init_thread_copy      = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),

        .update_thread_context = ONLY_IF_THREADS_ENABLED(ff_h264_update_thread_context),

        .profiles              = NULL_IF_CONFIG_SMALL(ff_h264_profiles),

        .priv_class            = &h264_class,

    };

    av_cold int ff_h264_decode_init(AVCodecContext *avctx)

    {

        H264Context *h = avctx->priv_data;

        int ret;

        ret = h264_init_context(avctx, h);

        if (ret < 0)

            return ret;

        ret = ff_thread_once(&h264_vlc_init, ff_h264_decode_init_vlc);

        if (ret != 0) {

            av_log(avctx, AV_LOG_ERROR, "pthread_once has failed.");

            return AVERROR_UNKNOWN;

        }

        if (avctx->ticks_per_frame == 1) {

            if(h->avctx->time_base.den < INT_MAX/2) {

                h->avctx->time_base.den *= 2;

            } else

                h->avctx->time_base.num /= 2;

        }

        avctx->ticks_per_frame = 2;

        if (avctx->extradata_size > 0 && avctx->extradata) {

            ret = ff_h264_decode_extradata(avctx->extradata, avctx->extradata_size,

                                           &h->ps, &h->is_avc, &h->nal_length_size,

                                           avctx->err_recognition, avctx);

            if (ret < 0) {

                h264_decode_end(avctx);

                return ret;

            }

        }

        if (h->ps.sps && h->ps.sps->bitstream_restriction_flag &&

            h->avctx->has_b_frames < h->ps.sps->num_reorder_frames) {

            h->avctx->has_b_frames = h->ps.sps->num_reorder_frames;

        }

        avctx->internal->allocate_progress = 1;

        ff_h264_flush_change(h);

        if (h->enable_er < 0 && (avctx->active_thread_type & FF_THREAD_SLICE))

            h->enable_er = 0;

        if (h->enable_er && (avctx->active_thread_type & FF_THREAD_SLICE)) {

            av_log(avctx, AV_LOG_WARNING,

                   "Error resilience with slice threads is enabled. It is unsafe and unsupported and may crash. "

                   "Use it at your own risk ");

        }

        return 0;

    }

    //初始化解码器的上下文,非常关键。之后解码所有的信息来源都是这里

    static int h264_init_context(AVCodecContext *avctx, H264Context *h)

    {

        int i;

        h->avctx                 = avctx;

        h->cur_chroma_format_idc = -1;

        h->picture_structure     = PICT_FRAME;

        h->workaround_bugs       = avctx->workaround_bugs;

        h->flags                 = avctx->flags;

        h->poc.prev_poc_msb      = 1 << 16;

        h->recovery_frame        = -1;

        h->frame_recovered       = 0;

        h->poc.prev_frame_num    = -1;

        h->sei.frame_packing.frame_packing_arrangement_cancel_flag = -1;

        h->sei.unregistered.x264_build = -1;

        h->next_outputed_poc = INT_MIN;

        for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)

            h->last_pocs[i] = INT_MIN;

        ff_h264_sei_uninit(&h->sei);

        avctx->chroma_sample_location = AVCHROMA_LOC_LEFT;

        h->nb_slice_ctx = (avctx->active_thread_type & FF_THREAD_SLICE) ? avctx->thread_count : 1;

        h->slice_ctx = av_mallocz_array(h->nb_slice_ctx, sizeof(*h->slice_ctx));

        if (!h->slice_ctx) {

            h->nb_slice_ctx = 0;

            return AVERROR(ENOMEM);

        }

        for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {

            h->DPB[i].f = av_frame_alloc();

            if (!h->DPB[i].f)

                return AVERROR(ENOMEM);

        }

        h->cur_pic.f = av_frame_alloc();

        if (!h->cur_pic.f)

            return AVERROR(ENOMEM);

        h->last_pic_for_ec.f = av_frame_alloc();

        if (!h->last_pic_for_ec.f)

            return AVERROR(ENOMEM);

        for (i = 0; i < h->nb_slice_ctx; i++)

            h->slice_ctx[i].h264 = h;

        return 0;

    }

    6、解码:

    ret = avctx->codec->decode(avctx, picture, got_picture_ptr,

                                           &tmp);

    static int h264_decode_frame(AVCodecContext *avctx, void *data,

                                 int *got_frame, AVPacket *avpkt)

    {

        const uint8_t *buf = avpkt->data;

        int buf_size       = avpkt->size;

        H264Context *h     = avctx->priv_data;

        AVFrame *pict      = data;

        int buf_index;

        int ret;

        h->flags = avctx->flags;

        h->setup_finished = 0;

        h->nb_slice_ctx_queued = 0;

        ff_h264_unref_picture(h, &h->last_pic_for_ec);

        /* end of stream, output what is still in the buffers */

        if (buf_size == 0)

            return send_next_delayed_frame(h, pict, got_frame, 0);

        if (h->is_avc && av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, NULL)) {

            int side_size;

            uint8_t *side = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA, &side_size);

            if (is_extra(side, side_size))

                ff_h264_decode_extradata(side, side_size,

                                         &h->ps, &h->is_avc, &h->nal_length_size,

                                         avctx->err_recognition, avctx);

        }

        if(h->is_avc && buf_size >= 9 && buf[0]==1 && buf[2]==0 && (buf[4]&0xFC)==0xFC && (buf[5]&0x1F) && buf[8]==0x67){

            if (is_extra(buf, buf_size))

                return ff_h264_decode_extradata(buf, buf_size,

                                                &h->ps, &h->is_avc, &h->nal_length_size,

                                                avctx->err_recognition, avctx);

        }

        buf_index = decode_nal_units(h, buf, buf_size);

        if (buf_index < 0)

            return AVERROR_INVALIDDATA;

        if (!h->cur_pic_ptr && h->nal_unit_type == H264_NAL_END_SEQUENCE) {

            av_assert0(buf_index <= buf_size);

            return send_next_delayed_frame(h, pict, got_frame, buf_index);

        }

        if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) && (!h->cur_pic_ptr || !h->has_slice)) {

            if (avctx->skip_frame >= AVDISCARD_NONREF ||

                buf_size >= 4 && !memcmp("Q264", buf, 4))

                return buf_size;

            av_log(avctx, AV_LOG_ERROR, "no frame! ");

            return AVERROR_INVALIDDATA;

        }

        if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) ||

            (h->mb_y >= h->mb_height && h->mb_height)) {

            if ((ret = ff_h264_field_end(h, &h->slice_ctx[0], 0)) < 0)

                return ret;

            /* Wait for second field. */

            if (h->next_output_pic) {

                ret = finalize_frame(h, pict, h->next_output_pic, got_frame);

                if (ret < 0)

                    return ret;

            }

        }

        av_assert0(pict->buf[0] || !*got_frame);

        ff_h264_unref_picture(h, &h->last_pic_for_ec);

        return get_consumed_bytes(buf_index, buf_size);

    }

    static int send_next_delayed_frame(H264Context *h, AVFrame *dst_frame,

                                       int *got_frame, int buf_index)

    {

        int ret, i, out_idx;

        H264Picture *out = h->delayed_pic[0];

        h->cur_pic_ptr = NULL;

        h->first_field = 0;

        out_idx = 0;

        for (i = 1;

             h->delayed_pic[i] &&

             !h->delayed_pic[i]->f->key_frame &&

             !h->delayed_pic[i]->mmco_reset;

             i++)

            if (h->delayed_pic[i]->poc < out->poc) {

                out     = h->delayed_pic[i];

                out_idx = i;

            }

        for (i = out_idx; h->delayed_pic[i]; i++)

            h->delayed_pic[i] = h->delayed_pic[i + 1];

        if (out) {

            out->reference &= ~DELAYED_PIC_REF;

            ret = finalize_frame(h, dst_frame, out, got_frame);

            if (ret < 0)

                return ret;

        }

        return buf_index;

    }

    static int finalize_frame(H264Context *h, AVFrame *dst, H264Picture *out, int *got_frame)

    {

        int ret;

        if (((h->avctx->flags & AV_CODEC_FLAG_OUTPUT_CORRUPT) ||

             (h->avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL) ||

             out->recovered)) {

            if (!h->avctx->hwaccel &&

                (out->field_poc[0] == INT_MAX ||

                 out->field_poc[1] == INT_MAX)

               ) {

                int p;

                AVFrame *f = out->f;

                int field = out->field_poc[0] == INT_MAX;

                uint8_t *dst_data[4];

                int linesizes[4];

                const uint8_t *src_data[4];

                av_log(h->avctx, AV_LOG_DEBUG, "Duplicating field %d to fill missing ", field);

                for (p = 0; p<4; p++) {

                    dst_data[p] = f->data[p] + (field^1)*f->linesize[p];

                    src_data[p] = f->data[p] +  field   *f->linesize[p];

                    linesizes[p] = 2*f->linesize[p];

                }

                av_image_copy(dst_data, linesizes, src_data, linesizes,

                              f->format, f->width, f->height>>1);

            }

            ret = output_frame(h, dst, out);

            if (ret < 0)

                return ret;

            *got_frame = 1;

            if (CONFIG_MPEGVIDEO) {

                ff_print_debug_info2(h->avctx, dst, NULL,

                                     out->mb_type,

                                     out->qscale_table,

                                     out->motion_val,

                                     NULL,

                                     h->mb_width, h->mb_height, h->mb_stride, 1);

            }

        }

        return 0;

    }

    static int output_frame(H264Context *h, AVFrame *dst, H264Picture *srcp)

    {

        AVFrame *src = srcp->f;

        const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(src->format);

        int i;

        int ret;

        if (src->format == AV_PIX_FMT_VIDEOTOOLBOX && src->buf[0]->size == 1)

            return AVERROR_EXTERNAL;

        ret = av_frame_ref(dst, src);

        if (ret < 0)

            return ret;

        av_dict_set(&dst->metadata, "stereo_mode", ff_h264_sei_stereo_mode(&h->sei.frame_packing), 0);

        if (srcp->sei_recovery_frame_cnt == 0)

            dst->key_frame = 1;

        if (!srcp->crop)

            return 0;

        for (i = 0; i < desc->nb_components; i++) {

            int hshift = (i > 0) ? desc->log2_chroma_w : 0;

            int vshift = (i > 0) ? desc->log2_chroma_h : 0;

            int off    = ((srcp->crop_left >> hshift) << h->pixel_shift) +

                          (srcp->crop_top  >> vshift) * dst->linesize[i];

            dst->data[i] += off;

        }

        return 0;

    }

  • 相关阅读:
    jQueryfocus,title,振动
    使用jQuery自动缩图片 (转载)
    jQuery10个小例子(jquery之旅).
    jQuery动态增加删除Tabs
    jQuery图片播放轮换
    jQuery插件上传控件美化
    Ajax简单
    jQuery仿QQ改版后的样式切换
    jQuery插件tooltip(超链接提示,图片提示).
    css分页样式
  • 原文地址:https://www.cnblogs.com/stnlcd/p/7149914.html
Copyright © 2011-2022 走看看