zoukankan      html  css  js  c++  java
  • ffplay源码分析03 ---- 视频解码线程

    
    

    =====================================================

    ffplay源码分析01 ---- 框架

    ffplay源码分析02 ---- 数据读取线程

    ffplay源码分析03 ---- 视频解码线程

    ffplay源码分析03 ---- 音频解码线程

    ffplay源码分析04 ---- 音频输出

    ffplay源码分析05 ---- 音频重采样

    ffplay源码分析06 ---- 视频输出

    ffplay源码分析07 ---- 音视频同步

    =====================================================


    打开流:stream_component_open()
    /* open a given stream. Return 0 if OK */
    /**
     * @brief stream_component_open
     * @param is
     * @param stream_index 流索引
     * @return Return 0 if OK
     */
    static int stream_component_open(VideoState *is, int stream_index)
    {
        AVFormatContext *ic = is->ic;
        AVCodecContext *avctx;
        AVCodec *codec;
        const char *forced_codec_name = NULL;
        AVDictionary *opts = NULL;
        AVDictionaryEntry *t = NULL;
        int sample_rate, nb_channels;
        int64_t channel_layout;
        int ret = 0;
        int stream_lowres = lowres;
    
        if (stream_index < 0 || stream_index >= ic->nb_streams)
            return -1;
        /*  为解码器分配一个编解码器上下文结构体 */
        avctx = avcodec_alloc_context3(NULL);
        if (!avctx)
            return AVERROR(ENOMEM);
        /* 将码流中的编解码器信息拷贝到新分配的编解码器上下文结构体 */
        ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
        if (ret < 0)
            goto fail;
        // 设置pkt_timebase
        avctx->pkt_timebase = ic->streams[stream_index]->time_base;
    
        /* 根据codec_id查找解码器 */
        codec = avcodec_find_decoder(avctx->codec_id);
    
        switch(avctx->codec_type){
            case AVMEDIA_TYPE_AUDIO   : is->last_audio_stream    = stream_index;
                forced_codec_name =    audio_codec_name; break;
            case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index;
                forced_codec_name = subtitle_codec_name; break;
            case AVMEDIA_TYPE_VIDEO   : is->last_video_stream    = stream_index;
                forced_codec_name =    video_codec_name; break;
        }
        if (forced_codec_name)
            codec = avcodec_find_decoder_by_name(forced_codec_name);
        if (!codec) {
            if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
                                          "No codec could be found with name '%s'
    ", forced_codec_name);
            else                   av_log(NULL, AV_LOG_WARNING,
                                          "No decoder could be found for codec %s
    ", avcodec_get_name(avctx->codec_id));
            ret = AVERROR(EINVAL);
            goto fail;
        }
    
        avctx->codec_id = codec->id;
        if (stream_lowres > codec->max_lowres) {
            av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d
    ",
                    codec->max_lowres);
            stream_lowres = codec->max_lowres;
        }
        avctx->lowres = stream_lowres;
    
        if (fast)
            avctx->flags2 |= AV_CODEC_FLAG2_FAST;
    
        opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
        if (!av_dict_get(opts, "threads", NULL, 0))
            av_dict_set(&opts, "threads", "auto", 0);
        if (stream_lowres)
            av_dict_set_int(&opts, "lowres", stream_lowres, 0);
        if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
            av_dict_set(&opts, "refcounted_frames", "1", 0);
        if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
            goto fail;
        }
        if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
            av_log(NULL, AV_LOG_ERROR, "Option %s not found.
    ", t->key);
            ret =  AVERROR_OPTION_NOT_FOUND;
            goto fail;
        }
    
        is->eof = 0;
        ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
        switch (avctx->codec_type) {
        case AVMEDIA_TYPE_AUDIO:
    #if CONFIG_AVFILTER
            {
                AVFilterContext *sink;
    
                is->audio_filter_src.freq           = avctx->sample_rate;
                is->audio_filter_src.channels       = avctx->channels;
                is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
                is->audio_filter_src.fmt            = avctx->sample_fmt;
                if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
                    goto fail;
                sink = is->out_audio_filter;
                sample_rate    = av_buffersink_get_sample_rate(sink);
                nb_channels    = av_buffersink_get_channels(sink);
                channel_layout = av_buffersink_get_channel_layout(sink);
            }
    #else
            sample_rate    = avctx->sample_rate;
            nb_channels    = avctx->channels;
            channel_layout = avctx->channel_layout;
    #endif
    
            /* prepare audio output 准备音频输出*/
            if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
                goto fail;
            is->audio_hw_buf_size = ret;
            is->audio_src = is->audio_tgt;
            is->audio_buf_size  = 0;
            is->audio_buf_index = 0;
    
            /* init averaging filter 初始化averaging滤镜, 非audio master时使用 */
            is->audio_diff_avg_coef  = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
            is->audio_diff_avg_count = 0;
            /* 由于我们没有精确的音频数据填充FIFO,故只有在大于该阈值时才进行校正音频同步*/
            is->audio_diff_threshold = (double)(is->audio_hw_buf_size) / is->audio_tgt.bytes_per_sec;
    
            is->audio_stream = stream_index;    // 获取audio的stream索引
            is->audio_st = ic->streams[stream_index];  // 获取audio的stream指针
            // 初始化ffplay封装的音频解码器
            decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
            if ((is->ic->iformat->flags & (AVFMT_NOBINSEARCH | AVFMT_NOGENSEARCH | AVFMT_NO_BYTE_SEEK)) && !is->ic->iformat->read_seek) {
                is->auddec.start_pts = is->audio_st->start_time;
                is->auddec.start_pts_tb = is->audio_st->time_base;
            }
            // 启动音频解码线程
            if ((ret = decoder_start(&is->auddec, audio_thread, "audio_decoder", is)) < 0)
                goto out;
            SDL_PauseAudioDevice(audio_dev, 0);
            break;
        case AVMEDIA_TYPE_VIDEO:
            is->video_stream = stream_index;    // 获取video的stream索引
            is->video_st = ic->streams[stream_index];// 获取video的stream指针
            // 初始化ffplay封装的视频解码器
            decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
            // 启动视频频解码线程
            if ((ret = decoder_start(&is->viddec, video_thread, "video_decoder", is)) < 0)
                goto out;
            is->queue_attachments_req = 1; // 使能请求mp3、aac等音频文件的封面
            break;
        case AVMEDIA_TYPE_SUBTITLE: // 视频是类似逻辑处理
            is->subtitle_stream = stream_index;
            is->subtitle_st = ic->streams[stream_index];
    
            decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread);
            if ((ret = decoder_start(&is->subdec, subtitle_thread, "subtitle_decoder", is)) < 0)
                goto out;
            break;
        default:
            break;
        }
        goto out;
    
    fail:
        avcodec_free_context(&avctx);
    out:
        av_dict_free(&opts);
    
        return ret;
    }

    启动解码线程 :decoder_start()

    /**
     * 创建解码线程, audio/video有独立的线程
     */
    static int decoder_start(Decoder *d, int (*fn)(void *), const char *thread_name, void* arg)
    {
        packet_queue_start(d->queue);   // 启用对应的packet 队列
        d->decoder_tid = SDL_CreateThread(fn, thread_name, arg);    // 创建解码线程
        if (!d->decoder_tid) {
            av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s
    ", SDL_GetError());
            return AVERROR(ENOMEM);
        }
        return 0;
    }

    启动packet队列 packet_queue_start()

    static void packet_queue_start(PacketQueue *q)
    {
        SDL_LockMutex(q->mutex);
        q->abort_request = 0;
        packet_queue_put_private(q, &flush_pkt); //这里放入了一个flush_pkt
        SDL_UnlockMutex(q->mutex);
    }

    pakcet包插入队列:packet_queue_put_private()

    static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
    {
        MyAVPacketList *pkt1;
    
        if (q->abort_request)   //如果已中止,则放入失败
           return -1;
    
        pkt1 = av_malloc(sizeof(MyAVPacketList));   //分配节点内存
        if (!pkt1)  //内存不足,则放入失败
            return -1;
        // 没有做引用计数,那这里也说明av_read_frame不会释放替用户释放buffer。
        pkt1->pkt = *pkt; //拷贝AVPacket(浅拷贝,AVPacket.data等内存并没有拷贝)
        pkt1->next = NULL;
        if (pkt == &flush_pkt)//如果放入的是flush_pkt,需要增加队列的播放序列号,以区分不连续的两段数据
        {
            q->serial++;
            printf("q->serial = %d
    ", q->serial);
        }
        pkt1->serial = q->serial;   //用队列序列号标记节点
        /* 队列操作:如果last_pkt为空,说明队列是空的,新增节点为队头;
         * 否则,队列有数据,则让原队尾的next为新增节点。 最后将队尾指向新增节点
         */
        if (!q->last_pkt)
            q->first_pkt = pkt1;
        else
            q->last_pkt->next = pkt1;
        q->last_pkt = pkt1;
    
        //队列属性操作:增加节点数、cache大小、cache总时长, 用来控制队列的大小
        q->nb_packets++;
        q->size += pkt1->pkt.size + sizeof(*pkt1);
        q->duration += pkt1->pkt.duration;
    
        /* XXX: should duplicate packet data in DV case */
        //发出信号,表明当前队列中有数据了,通知等待中的读线程可以取数据了
        SDL_CondSignal(q->cond);
        return 0;
    }

    flush_pkt用来区分连续不同的流,这里有几个地方用到:
    1. 队列初始化的时候
    2. seek的时候,插入,可以冲刷之前缓存的流

    视频解码线程:video_thread()

    // 视频解码线程
    static int video_thread(void *arg)
    {
        VideoState *is = arg;
        AVFrame *frame = av_frame_alloc();  // 分配解码帧
        double pts;                 // pts
        double duration;            // 帧持续时间
        int ret;
        //1 获取stream timebase
        AVRational tb = is->video_st->time_base; // 获取stream timebase
        //2 获取帧率,以便计算每帧picture的duration
        AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
    
        if (!frame)
            return AVERROR(ENOMEM);
    
        for (;;) {  // 循环取出视频解码的帧数据
            // 3 获取解码后的视频帧
            ret = get_video_frame(is, frame);
            if (ret < 0)
                goto the_end;   //解码结束, 什么时候会结束
            if (!ret)           //没有解码得到画面, 什么情况下会得不到解后的帧
                continue;
                // 4 计算帧持续时间和换算pts值为秒
                // 1/帧率 = duration 单位秒, 没有帧率时则设置为0, 有帧率帧计算出帧间隔
                duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
                // 根据AVStream timebase计算出pts值, 单位为秒
                pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
                // 5 将解码后的视频帧插入队列
                ret = queue_picture(is, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial);
                // 6 释放frame对应的数据
                av_frame_unref(frame);
            if (ret < 0) // 返回值小于0则退出线程
                goto the_end;
        }
     the_end:
        av_frame_free(&frame);
        return 0;
    }

    获取视频帧:get_video_frame()

    /**
     * @brief 获取视频帧
     * @param is
     * @param frame 指向获取的视频帧
     * @return
     */
    static int get_video_frame(VideoState *is, AVFrame *frame)
    {
        int got_picture;
        // 1. 获取解码后的视频帧
        if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0) {
            return -1; // 返回-1意味着要退出解码线程, 所以要分析decoder_decode_frame什么情况下返回-1
        }
    
        if (got_picture) {
            // 2. 分析获取到的该帧是否要drop掉, 该机制的目的是在放入帧队列前先drop掉过时的视频帧
            double dpts = NAN;
    
            if (frame->pts != AV_NOPTS_VALUE)
                dpts = av_q2d(is->video_st->time_base) * frame->pts;    //计算出秒为单位的pts
    
            frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
    
            if (framedrop>0 || // 允许drop帧
                    (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER))//非视频同步模式
            {
                if (frame->pts != AV_NOPTS_VALUE) { // pts值有效
                    double diff = dpts - get_master_clock(is);
                    if (!isnan(diff) &&     // 差值有效
                            fabs(diff) < AV_NOSYNC_THRESHOLD && // 差值在可同步范围呢
                            diff - is->frame_last_filter_delay < 0 && // 和过滤器有关系
                            is->viddec.pkt_serial == is->vidclk.serial && // 同一序列的包
                            is->videoq.nb_packets) { // packet队列至少有1帧数据
                        is->frame_drops_early++;
                        printf("%s(%d) diff:%lfs, drop frame, drops:%d
    ",
                               __FUNCTION__, __LINE__, diff, is->frame_drops_early);
                        av_frame_unref(frame);
                        got_picture = 0;
                    }
                }
            }
        }
    
        return got_picture;
    }

    decoder_decode_frame()
    static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub) {
        int ret = AVERROR(EAGAIN);
    
        for (;;) {
            AVPacket pkt;
            // 1. 流连续情况下获取解码后的帧
            if (d->queue->serial == d->pkt_serial) { // 1.1 先判断是否是同一播放序列的数据
                do {
                    if (d->queue->abort_request)
                        return -1;  // 是否请求退出
                    // 1.2. 获取解码帧
                    switch (d->avctx->codec_type) {
                        case AVMEDIA_TYPE_VIDEO:
                            ret = avcodec_receive_frame(d->avctx, frame);
                            //printf("frame pts:%ld, dts:%ld
    ", frame->pts, frame->pkt_dts);
                            if (ret >= 0) {
                                if (decoder_reorder_pts == -1) {
                                    frame->pts = frame->best_effort_timestamp;
                                } else if (!decoder_reorder_pts) {
                                    frame->pts = frame->pkt_dts;
                                }
                            }
                            break;
                        case AVMEDIA_TYPE_AUDIO:
                            ret = avcodec_receive_frame(d->avctx, frame);
                            if (ret >= 0) {
                                AVRational tb = (AVRational){1, frame->sample_rate};
                                if (frame->pts != AV_NOPTS_VALUE)
                                    frame->pts = av_rescale_q(frame->pts, d->avctx->pkt_timebase, tb);
                                else if (d->next_pts != AV_NOPTS_VALUE)
                                    frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
                                if (frame->pts != AV_NOPTS_VALUE) {
                                    d->next_pts = frame->pts + frame->nb_samples;
                                    d->next_pts_tb = tb;
                                }
                            }
                            break;
                    }
    
                    // 1.3. 检查解码是否已经结束,解码结束返回0
                    if (ret == AVERROR_EOF) {
                        d->finished = d->pkt_serial;
                        printf("avcodec_flush_buffers %s(%d)
    ", __FUNCTION__, __LINE__);
                        avcodec_flush_buffers(d->avctx);
                        return 0;
                    }
                    // 1.4. 正常解码返回1
                    if (ret >= 0)
                        return 1;
                } while (ret != AVERROR(EAGAIN));   // 1.5 没帧可读时ret返回EAGIN,需要继续送packet
            }
    
            // 2 获取一个packet,如果播放序列不一致(数据不连续)则过滤掉“过时”的packet
            do {
                // 2.1 如果没有数据可读则唤醒read_thread, 实际是continue_read_thread SDL_cond
                if (d->queue->nb_packets == 0)  // 没有数据可读
                    SDL_CondSignal(d->empty_queue_cond);// 通知read_thread放入packet
                // 2.2 如果还有pending的packet则使用它
                if (d->packet_pending) {
                    av_packet_move_ref(&pkt, &d->pkt);
                    d->packet_pending = 0;
                } else {
                    // 2.3 阻塞式读取packet
                    if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0)
                        return -1;
                }
                if(d->queue->serial != d->pkt_serial) {
                    // darren自己的代码
                    printf("%s(%d) discontinue:queue->serial:%d,pkt_serial:%d
    ",
                           __FUNCTION__, __LINE__, d->queue->serial, d->pkt_serial);
                    av_packet_unref(&pkt); // fixed me? 释放要过滤的packet
                }
            } while (d->queue->serial != d->pkt_serial);// 如果不是同一播放序列(流不连续)则继续读取
    
            // 3 将packet送入解码器
            if (pkt.data == flush_pkt.data) {//
                // when seeking or when switching to a different stream
                avcodec_flush_buffers(d->avctx); //清空里面的缓存帧
                d->finished = 0;        // 重置为0
                d->next_pts = d->start_pts;     // 主要用在了audio
                d->next_pts_tb = d->start_pts_tb;// 主要用在了audio
            } else {
                if (d->avctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
                    int got_frame = 0;
                    ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &pkt);
                    if (ret < 0) {
                        ret = AVERROR(EAGAIN);
                    } else {
                        if (got_frame && !pkt.data) {
                           d->packet_pending = 1;
                           av_packet_move_ref(&d->pkt, &pkt);
                        }
                        ret = got_frame ? 0 : (pkt.data ? AVERROR(EAGAIN) : AVERROR_EOF);
                    }
                } else {
                    if (avcodec_send_packet(d->avctx, &pkt) == AVERROR(EAGAIN)) {
                        av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.
    ");
                        d->packet_pending = 1;
                        av_packet_move_ref(&d->pkt, &pkt);
                    }
                }
                av_packet_unref(&pkt);    // 一定要自己去释放音视频数据
            }
        }
    }

     packet_pending含义:我们主要分析这个端代码:

     if (avcodec_send_packet(d->avctx, &pkt) == AVERROR(EAGAIN)) {
                        av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.
    ");
                        d->packet_pending = 1;
                        av_packet_move_ref(&d->pkt, &pkt);
                    }

    我们看看ffmpeg源码:

    int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
    {
        AVCodecInternal *avci = avctx->internal;
        int ret;
    
        if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
            return AVERROR(EINVAL);
    
        if (avctx->internal->draining)
            return AVERROR_EOF;
    
        if (avpkt && !avpkt->size && avpkt->data)
            return AVERROR(EINVAL);
    
        av_packet_unref(avci->buffer_pkt);
        if (avpkt && (avpkt->data || avpkt->side_data_elems)) {
            ret = av_packet_ref(avci->buffer_pkt, avpkt);
            if (ret < 0)
                return ret;
        }
    
        ret = av_bsf_send_packet(avci->filter.bsfs[0], avci->buffer_pkt);
        if (ret < 0) {
            av_packet_unref(avci->buffer_pkt);
            return ret;
        }
    
        if (!avci->buffer_frame->buf[0]) {
            ret = decode_receive_frame_internal(avctx, avci->buffer_frame);
            if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
                return ret;
        }
    
        return 0;
    }
    int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
    {
        int ret;
    
        if (!pkt || (!pkt->data && !pkt->side_data_elems)) {
            ctx->internal->eof = 1;
            return 0;
        }
    
        if (ctx->internal->eof) {
            av_log(ctx, AV_LOG_ERROR, "A non-NULL packet sent after an EOF.
    ");
            return AVERROR(EINVAL);
        }
    
        if (ctx->internal->buffer_pkt->data ||
            ctx->internal->buffer_pkt->side_data_elems)
            return AVERROR(EAGAIN);
    
        ret = av_packet_make_refcounted(pkt);
        if (ret < 0)
            return ret;
        av_packet_move_ref(ctx->internal->buffer_pkt, pkt);
    
        return 0;
    }

    可以看到,返回AVERROR(EAGAIN)表示有数据,这个其实没有送进去,所以这里就比较好理解了,就是缓存一下数据。

  • 相关阅读:
    GetBuffer与ReleaseBuffer的用法,CString剖析
    Mysql 关闭自动提交
    Mysql 创建用户和数据库
    老爸陪我去面试——北漂18年(3)
    Java中的“&”和“&&”的区别
    Java常量定义
    利用Java API生成50到100之间的随机数
    Java考查“==”和equals
    列出JDK中常用的Java包
    cognos 配置
  • 原文地址:https://www.cnblogs.com/vczf/p/14095531.html
Copyright © 2011-2022 走看看