zoukankan      html  css  js  c++  java
  • 第11月第8天 ffmpeg ffplay

    static int ffplay_video_thread(void *arg)
    {
        FFPlayer *ffp = arg;
        VideoState *is = ffp->is;
        AVFrame *frame = av_frame_alloc();
    ...
        for (;;) {
            ret = get_video_frame(ffp, frame);
    ...
                ret = queue_picture(ffp, frame, pts, duration, av_frame_get_pkt_pos(frame), is->viddec.pkt_serial);
                av_frame_unref(frame);
    #if CONFIG_AVFILTER
            }
    ...
    
        /* alloc or resize hardware picture buffer */
        if (!vp->bmp || !vp->allocated ||
            vp->width  != src_frame->width ||
            vp->height != src_frame->height ||
            vp->format != src_frame->format) {
    
            if (vp->width != src_frame->width || vp->height != src_frame->height)
                ffp_notify_msg3(ffp, FFP_MSG_VIDEO_SIZE_CHANGED, src_frame->width, src_frame->height);
    
            vp->allocated = 0;
            vp->width = src_frame->width;
            vp->height = src_frame->height;
            vp->format = src_frame->format;
    
            /* the allocation must be done in the main thread to avoid
               locking problems. */
            alloc_picture(ffp, src_frame->format);
    
            if (is->videoq.abort_request)
                return -1;
        }
    
        /* if the frame is not skipped, then display it */
        if (vp->bmp) {
            /* get a pointer on the bitmap */
            SDL_VoutLockYUVOverlay(vp->bmp);
    
    #ifdef FFP_MERGE
    #if CONFIG_AVFILTER
            // FIXME use direct rendering
            av_image_copy(data, linesize, (const uint8_t **)src_frame->data, src_frame->linesize,
                            src_frame->format, vp->width, vp->height);
    #else
            // sws_getCachedContext(...);
    #endif
    #endif
            // FIXME: set swscale options
            if (SDL_VoutFillFrameYUVOverlay(vp->bmp, src_frame) < 0) {
                av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context
    ");
                exit(1);
            }
    
    ...
    
    static void alloc_picture(FFPlayer *ffp, int frame_format)
    {
        VideoState *is = ffp->is;
        Frame *vp;
    #ifdef FFP_MERGE
        int sdl_format;
    #endif
    
        vp = &is->pictq.queue[is->pictq.windex];
    
        free_picture(vp);
    
    #ifdef FFP_MERGE
        video_open(is, vp);
    #endif
    
        SDL_VoutSetOverlayFormat(ffp->vout, ffp->overlay_format);
        vp->bmp = SDL_Vout_CreateOverlay(vp->width, vp->height,
                                       frame_format,
                                       ffp->vout);
    ...
    
    int SDL_VoutFillFrameYUVOverlay(SDL_VoutOverlay *overlay, const AVFrame *frame)
    {
        if (!overlay || !overlay->func_fill_frame)
            return -1;
    
        return overlay->func_fill_frame(overlay, frame);
    }
    
    
    static int get_video_frame(FFPlayer *ffp, AVFrame *frame)
    {
        VideoState *is = ffp->is;
        int got_picture;
    
        ffp_video_statistic_l(ffp);
        if ((got_picture = decoder_decode_frame(ffp, &is->viddec, frame, NULL)) < 0)
            return -1;
    
    ...

     rindex

    /* called to display each frame */
    static void video_refresh(FFPlayer *opaque, double *remaining_time)
    {
    ..
    display:
            /* display picture */
            if (!ffp->display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
                video_display2(ffp);
    ...
    
    /* display the current picture, if any */
    static void video_display2(FFPlayer *ffp)
    {
        VideoState *is = ffp->is;
        if (is->video_st)
            video_image_display2(ffp);
    }
    
    static void video_image_display2(FFPlayer *ffp)
    {
        VideoState *is = ffp->is;
        Frame *vp;
        Frame *sp = NULL;
    
        vp = frame_queue_peek_last(&is->pictq);
    
        int latest_seek_load_serial = __atomic_exchange_n(&(is->latest_seek_load_serial), -1, memory_order_seq_cst);
        if (latest_seek_load_serial == vp->serial)
            ffp->stat.latest_seek_load_duration = (av_gettime() - is->latest_seek_load_start_at) / 1000;
    
        if (vp->bmp) {
            if (is->subtitle_st) {
                if (frame_queue_nb_remaining(&is->subpq) > 0) {
                    sp = frame_queue_peek(&is->subpq);
    
                    if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
                        if (!sp->uploaded) {
                            if (sp->sub.num_rects > 0) {
                                char buffered_text[4096];
                                if (sp->sub.rects[0]->text) {
                                    strncpy(buffered_text, sp->sub.rects[0]->text, 4096);
                                }
                                else if (sp->sub.rects[0]->ass) {
                                    parse_ass_subtitle(sp->sub.rects[0]->ass, buffered_text);
                                }
                                ffp_notify_msg4(ffp, FFP_MSG_TIMED_TEXT, 0, 0, buffered_text, sizeof(buffered_text));
                            }
                            sp->uploaded = 1;
                        }
                    }
                }
            }
            SDL_VoutDisplayYUVOverlay(ffp->vout, vp->bmp);
            ffp->stat.vfps = SDL_SpeedSamplerAdd(&ffp->vfps_sampler, FFP_SHOW_VFPS_FFPLAY, "vfps[ffplay]");
            if (!ffp->first_video_frame_rendered) {
                ffp->first_video_frame_rendered = 1;
                ffp_notify_msg1(ffp, FFP_MSG_VIDEO_RENDERING_START);
            }
        }
    }
    
    static Frame *frame_queue_peek_last(FrameQueue *f)
    {
        return &f->queue[f->rindex];
    }

    1.

    static int func_fill_frame(SDL_VoutOverlay *overlay, const AVFrame *frame)
    {
        assert(overlay);
        SDL_VoutOverlay_Opaque *opaque = overlay->opaque;
        AVFrame swscale_dst_pic = { { 0 } };
    
        av_frame_unref(opaque->linked_frame);
    
        int need_swap_uv = 0;
        int use_linked_frame = 0;
        enum AVPixelFormat dst_format = AV_PIX_FMT_NONE;
        switch (overlay->format) {
            case SDL_FCC_YV12:
                need_swap_uv = 1;
                // no break;
            case SDL_FCC_I420:
                if (frame->format == AV_PIX_FMT_YUV420P || frame->format == AV_PIX_FMT_YUVJ420P) {
                    // ALOGE("direct draw frame");
                    use_linked_frame = 1;
                    dst_format = frame->format;
                } else {
                    // ALOGE("copy draw frame");
                    dst_format = AV_PIX_FMT_YUV420P;
                }
                break;
            case SDL_FCC_I444P10LE:
                if (frame->format == AV_PIX_FMT_YUV444P10LE) {
                    // ALOGE("direct draw frame");
                    use_linked_frame = 1;
                    dst_format = frame->format;
                } else {
                    // ALOGE("copy draw frame");
                    dst_format = AV_PIX_FMT_YUV444P10LE;
                }
                break;
            case SDL_FCC_RV32:
                dst_format = AV_PIX_FMT_0BGR32;
                break;
            case SDL_FCC_RV24:
                dst_format = AV_PIX_FMT_RGB24;
                break;
            case SDL_FCC_RV16:
                dst_format = AV_PIX_FMT_RGB565;
                break;
            default:
                ALOGE("SDL_VoutFFmpeg_ConvertPicture: unexpected overlay format %s(%d)",
                      (char*)&overlay->format, overlay->format);
                return -1;
        }
    
    
        // setup frame
        if (use_linked_frame) {
            // linked frame
            av_frame_ref(opaque->linked_frame, frame);
    
            overlay_fill(overlay, opaque->linked_frame, opaque->planes);
    
    ...
    
    static void overlay_fill(SDL_VoutOverlay *overlay, AVFrame *frame, int planes)
    {
        overlay->planes = planes;
    
        for (int i = 0; i < AV_NUM_DATA_POINTERS; ++i) {
            overlay->pixels[i] = frame->data[i];
            overlay->pitches[i] = frame->linesize[i];
        }
    }
    ...
    
    static GLboolean yuv420p_uploadTexture(IJK_GLES2_Renderer *renderer, SDL_VoutOverlay *overlay)
    {
        if (!renderer || !overlay)
            return GL_FALSE;
    
              int     planes[3]    = { 0, 1, 2 };
        const GLsizei widths[3]    = { overlay->pitches[0], overlay->pitches[1], overlay->pitches[2] };
        const GLsizei heights[3]   = { overlay->h,          overlay->h / 2,      overlay->h / 2 };
        const GLubyte *pixels[3]   = { overlay->pixels[0],  overlay->pixels[1],  overlay->pixels[2] };
    
        switch (overlay->format) {
            case SDL_FCC_I420:
                break;
            case SDL_FCC_YV12:
                planes[1] = 2;
                planes[2] = 1;
                break;
            default:
                ALOGE("[yuv420p] unexpected format %x
    ", overlay->format);
                return GL_FALSE;
        }
    
        for (int i = 0; i < 3; ++i) {
            int plane = planes[i];
    
            glBindTexture(GL_TEXTURE_2D, renderer->plane_textures[i]);
    
            glTexImage2D(GL_TEXTURE_2D,
                         0,
                         GL_LUMINANCE,
                         widths[plane],
                         heights[plane],
                         0,
                         GL_LUMINANCE,
                         GL_UNSIGNED_BYTE,
                         pixels[plane]);
        }
    
        return GL_TRUE;
    }

    http://blog.csdn.net/liujiakunit/article/details/46899229

    2.

    /* open a given stream. Return 0 if OK */
    static int stream_component_open(FFPlayer *ffp, int stream_index)
    {
        VideoState *is = ffp->is;
        AVFormatContext *ic = is->ic;
        AVCodecContext *avctx;
        AVCodec *codec = NULL;
        const char *forced_codec_name = NULL;
        AVDictionary *opts = NULL;
        AVDictionaryEntry *t = NULL;
        int sample_rate, nb_channels;
        int64_t channel_layout;
        int ret = 0;
        int stream_lowres = ffp->lowres;
    
        if (stream_index < 0 || stream_index >= ic->nb_streams)
            return -1;
        avctx = avcodec_alloc_context3(NULL);
        if (!avctx)
            return AVERROR(ENOMEM);
    
        ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
        if (ret < 0)
            goto fail;
        av_codec_set_pkt_timebase(avctx, ic->streams[stream_index]->time_base);
    
        codec = avcodec_find_decoder(avctx->codec_id);
    ...
    
        case AVMEDIA_TYPE_VIDEO:
            is->video_stream = stream_index;
            is->video_st = ic->streams[stream_index];
    
            decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
    
    ...
    
    static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
        memset(d, 0, sizeof(Decoder));
        d->avctx = avctx;
        d->queue = queue;
        d->empty_queue_cond = empty_queue_cond;
        d->start_pts = AV_NOPTS_VALUE;
    
        d->first_frame_decoded_time = SDL_GetTickHR();
        d->first_frame_decoded = 0;
    
        SDL_ProfilerReset(&d->decode_profiler, -1);
    }
    static int get_video_frame(FFPlayer *ffp, AVFrame *frame)
    {
        VideoState *is = ffp->is;
        int got_picture;
    
        ffp_video_statistic_l(ffp);
        if ((got_picture = decoder_decode_frame(ffp, &is->viddec, frame, NULL)) < 0)
            return -1;
    
    ...
    
    
    static int decoder_decode_frame(FFPlayer *ffp, Decoder *d, AVFrame *frame, AVSubtitle *sub) {
        int got_frame = 0;
    
        do {
            int ret = -1;
    
            if (d->queue->abort_request)
                return -1;
    
            if (!d->packet_pending || d->queue->serial != d->pkt_serial) {
                AVPacket pkt;
                do {
                    if (d->queue->nb_packets == 0)
                        SDL_CondSignal(d->empty_queue_cond);
                    if (packet_queue_get_or_buffering(ffp, d->queue, &pkt, &d->pkt_serial, &d->finished) < 0)
                        return -1;
                    if (pkt.data == flush_pkt.data) {
                        avcodec_flush_buffers(d->avctx);
                        d->finished = 0;
                        d->next_pts = d->start_pts;
                        d->next_pts_tb = d->start_pts_tb;
                    }
                } while (pkt.data == flush_pkt.data || d->queue->serial != d->pkt_serial);
                av_packet_unref(&d->pkt);
                d->pkt_temp = d->pkt = pkt;
                d->packet_pending = 1;
            }
    
            switch (d->avctx->codec_type) {
                case AVMEDIA_TYPE_VIDEO: {
                    ret = avcodec_decode_video2(d->avctx, frame, &got_frame, &d->pkt_temp);
                    if (got_frame) {
                        ffp->stat.vdps = SDL_SpeedSamplerAdd(&ffp->vdps_sampler, FFP_SHOW_VDPS_AVCODEC, "vdps[avcodec]");
                        if (ffp->decoder_reorder_pts == -1) {
                            frame->pts = av_frame_get_best_effort_timestamp(frame);
                        } else if (!ffp->decoder_reorder_pts) {
                            frame->pts = frame->pkt_dts;
                        }
                    }
                    }
                    break;
                case AVMEDIA_TYPE_AUDIO:
                    ret = avcodec_decode_audio4(d->avctx, frame, &got_frame, &d->pkt_temp);
                    if (got_frame) {
                        AVRational tb = (AVRational){1, frame->sample_rate};
                        if (frame->pts != AV_NOPTS_VALUE)
                            frame->pts = av_rescale_q(frame->pts, av_codec_get_pkt_timebase(d->avctx), tb);
                        else if (d->next_pts != AV_NOPTS_VALUE)
                            frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
                        if (frame->pts != AV_NOPTS_VALUE) {
                            d->next_pts = frame->pts + frame->nb_samples;
                            d->next_pts_tb = tb;
                        }
                    }
                    break;
                case AVMEDIA_TYPE_SUBTITLE:
                    ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &d->pkt_temp);
                    break;
                default:
                    break;
            }
    
            if (ret < 0) {
                d->packet_pending = 0;
            } else {
                d->pkt_temp.dts =
                d->pkt_temp.pts = AV_NOPTS_VALUE;
                if (d->pkt_temp.data) {
                    if (d->avctx->codec_type != AVMEDIA_TYPE_AUDIO)
                        ret = d->pkt_temp.size;
                    d->pkt_temp.data += ret;
                    d->pkt_temp.size -= ret;
                    if (d->pkt_temp.size <= 0)
                        d->packet_pending = 0;
                } else {
                    if (!got_frame) {
                        d->packet_pending = 0;
                        d->finished = d->pkt_serial;
                    }
                }
            }
        } while (!got_frame && !d->finished);
    
        return got_frame;
    }

    3.

    /* prepare a new audio buffer */
    static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
    {
        FFPlayer *ffp = opaque;
        VideoState *is = ffp->is;
        int audio_size, len1;
        if (!ffp || !is) {
            memset(stream, 0, len);
            return;
        }
    
        ffp->audio_callback_time = av_gettime_relative();
    
        if (ffp->pf_playback_rate_changed) {
            ffp->pf_playback_rate_changed = 0;
    #if defined(__ANDROID__)
            if (!ffp->soundtouch_enable) {
                SDL_AoutSetPlaybackRate(ffp->aout, ffp->pf_playback_rate);
            }
    #else
            SDL_AoutSetPlaybackRate(ffp->aout, ffp->pf_playback_rate);
    #endif
        }
        if (ffp->pf_playback_volume_changed) {
            ffp->pf_playback_volume_changed = 0;
            SDL_AoutSetPlaybackVolume(ffp->aout, ffp->pf_playback_volume);
        }
    
        while (len > 0) {
            if (is->audio_buf_index >= is->audio_buf_size) {
               audio_size = audio_decode_frame(ffp);
    ...
    
               if (audio_size < 0) {
                    /* if error, just output silence */
                   is->audio_buf = NULL;
                   is->audio_buf_size = SDL_AUDIO_MIN_BUFFER_SIZE / is->audio_tgt.frame_size * is->audio_tgt.frame_size;
               } else {
                   if (is->show_mode != SHOW_MODE_VIDEO)
                       update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
                   is->audio_buf_size = audio_size;
               }
               is->audio_buf_index = 0;
            }
            if (is->auddec.pkt_serial != is->audioq.serial) {
                is->audio_buf_index = is->audio_buf_size;
                memset(stream, 0, len);
                // stream += len;
                // len = 0;
                SDL_AoutFlushAudio(ffp->aout);
                break;
            }
            len1 = is->audio_buf_size - is->audio_buf_index;
            if (len1 > len)
                len1 = len;
            if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
                memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
            else {
                memset(stream, 0, len1);
                if (!is->muted && is->audio_buf)
                    SDL_MixAudio(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1, is->audio_volume);
            }
            len -= len1;
            stream += len1;
            is->audio_buf_index += len1;
        }
        is->audio_write_buf_size = is->audio_buf_size - is->audio_buf_index;
        /* Let's assume the audio driver that is used by SDL has two periods. */
        if (!isnan(is->audio_clock)) {
            set_clock_at(&is->audclk, is->audio_clock - (double)(is->audio_write_buf_size) / is->audio_tgt.bytes_per_sec - SDL_AoutGetLatencySeconds(ffp->aout), is->audio_clock_serial, ffp->audio_callback_time / 1000000.0);
            sync_clock_to_slave(&is->extclk, &is->audclk);
        }
    }
    
    static int audio_decode_frame(FFPlayer *ffp)
    {
        VideoState *is = ffp->is;
        int data_size, resampled_data_size;
        int64_t dec_channel_layout;
        av_unused double audio_clock0;
        int wanted_nb_samples;
        Frame *af;
        int translate_time = 1;
    
        if (is->paused || is->step)
            return -1;
    
        if (ffp->sync_av_start &&                       /* sync enabled */
            is->video_st &&                             /* has video stream */
            !is->viddec.first_frame_decoded &&          /* not hot */
            is->viddec.finished != is->videoq.serial) { /* not finished */
            /* waiting for first video frame */
            Uint64 now = SDL_GetTickHR();
            if (now < is->viddec.first_frame_decoded_time ||
                now > is->viddec.first_frame_decoded_time + 2000) {
                is->viddec.first_frame_decoded = 1;
            } else {
                /* video pipeline is not ready yet */
                return -1;
            }
        }
    reload:
        do {
    #if defined(_WIN32) || defined(__APPLE__)
            while (frame_queue_nb_remaining(&is->sampq) == 0) {
                if ((av_gettime_relative() - ffp->audio_callback_time) > 1000000LL * is->audio_hw_buf_size / is->audio_tgt.bytes_per_sec / 2)
                    return -1;
                av_usleep (1000);
            }
    #endif
            if (!(af = frame_queue_peek_readable(&is->sampq)))
                return -1;
            frame_queue_next(&is->sampq);
        } while (af->serial != is->audioq.serial);
    ...
    
        if (is->swr_ctx) {
            const uint8_t **in = (const uint8_t **)af->frame->extended_data;
            uint8_t **out = &is->audio_buf1;
            int out_count = (int)((int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256);
            int out_size  = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
            int len2;
            if (out_size < 0) {
                av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed
    ");
                return -1;
            }
            if (wanted_nb_samples != af->frame->nb_samples) {
                if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
                                            wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
                    av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed
    ");
                    return -1;
                }
            }
            av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
    
            if (!is->audio_buf1)
                return AVERROR(ENOMEM);
            len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
            if (len2 < 0) {
                av_log(NULL, AV_LOG_ERROR, "swr_convert() failed
    ");
                return -1;
            }
            if (len2 == out_count) {
                av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small
    ");
                if (swr_init(is->swr_ctx) < 0)
                    swr_free(&is->swr_ctx);
            }
            is->audio_buf = is->audio_buf1;
            int bytes_per_sample = av_get_bytes_per_sample(is->audio_tgt.fmt);
            resampled_data_size = len2 * is->audio_tgt.channels * bytes_per_sample;
    #if defined(__ANDROID__)
            if (ffp->soundtouch_enable && ffp->pf_playback_rate != 1.0f && !is->abort_request) {
                av_fast_malloc(&is->audio_new_buf, &is->audio_new_buf_size, out_size * translate_time);
                for (int i = 0; i < (resampled_data_size / 2); i++)
                {
                    is->audio_new_buf[i] = (is->audio_buf1[i * 2] | (is->audio_buf1[i * 2 + 1] << 8));
                }
    
                int ret_len = ijk_soundtouch_translate(is->handle, is->audio_new_buf, (float)(ffp->pf_playback_rate), (float)(1.0f/ffp->pf_playback_rate),
                        resampled_data_size / 2, bytes_per_sample, is->audio_tgt.channels, af->frame->sample_rate);
                if (ret_len > 0) {
                    is->audio_buf = (uint8_t*)is->audio_new_buf;
                    resampled_data_size = ret_len;
                } else {
                    translate_time++;
                    goto reload;
                }
            }
    #endif
        } else {
            is->audio_buf = af->frame->data[0];
            resampled_data_size = data_size;
        }
  • 相关阅读:
    团队项目第二阶段——第一天
    团队项目第一阶段绩效评估
    铁大树洞与市面上现有APP对比
    第一阶段其他组评价汇总
    第一阶段对其他组的评价
    铁大树洞app功能演示和使用说明
    团队冲刺——第十天
    团队冲刺——第九天
    团队冲刺——第八天
    梦断代码读后感04--毁灭即拯救
  • 原文地址:https://www.cnblogs.com/javastart/p/7300377.html
Copyright © 2011-2022 走看看