zoukankan      html  css  js  c++  java
  • FFMPEG SDK 开发介绍(原创)

    来源:http://blog.sina.com.cn/s/blog_62a8419a01016exv.html

    本文是作者在使用ffmpeg sdk开发过程中的实际经验,现在与大家分享,欢迎学习交流。

    FFMPEG SDK 开发介绍

    1.简介:
        ffmpeg是一套可以用来记录、转换数字音频、视频,并能将其转化为流的开源计算机程序。
    使用ffmpeg能够完成如下功能:parse,demux,decode,filter(preprocessing),encode,mux,stream和player等.

    2.下载和编译:

        下载地址:  http://ffmpeg.org/download.html

        编译:
           1)windows平台static library/shared library, 编译工具:mingw-gcc或者在linux平台下交叉编译(推荐)
           2)linux平台static library/shared library, 编译工具:gcc

        模块:
            libavcodec    - 编码解码器
            libavdevice   - 输入输出设备的支持
            libavfilter   - 视音频滤镜支持
            libavformat   - 视音频等格式的解析
            libavutil     - 工具库
            libpostproc   - 后期效果处理
            libswscale    - 图像颜色、尺寸转换
        
    3.SDK介绍和开发(基于ffmpeg 0.8 sdk)
        ffmpeg每部分功能都采用plugin的方式,使用统一的接口调用,这样就能够非常方便的使用和扩展。
        plugin分为几种:muxer,demuxer,protocol,hwaccel,encoder,decoder,parser,bitstream,filter,...
        因此在使用SDK的时候第一步就是注册plugin
        
        avcodec_register_all()  : 注册 hwaccel,encoder,decoder,parser,bitstream
        av_register_all()       : 注册 muxer,demuxer,protocol
        avfilter_register_all() : 注册 滤镜filter
        
        下面根据不同的应用场景,给出主要的代码示例(仅是代码片断,不一定能编译通过):
        
        1)如何获取媒体文件的信息(Parser):
        // 参考V3代码: interface IFileDecoder, media/impl/filedecoderimpl.cpp
        
        {
            av_register_all();
            AVFormatContext * pFormatCtx = NULL;
            int err = 0;
            const char *fileName = "c:\test.mp4";
            err = av_open_input_file(&pFormatCtx, fileName, NULL, 0, NULL);
            if(err != 0)
            {
                // break ;
            }
            err = av_find_stream_info(pFormatCtx);
            if(err < 0)
            {
                // break ;
            }
            for(uint32_t i = 0; i < pFormatCtx->nb_streams; i ++)
            {
                // stream 结构数据
                AVStream *pStream = pFormatCtx->streams[i];
                // 帧率信息
                AVRational frameRate = pStream->r_frame_rate;
                // 时间单位比率
                AVRational timeBase = pStream->time_base;
                // stream duration
                int64_t duration = pStream->duration;
                
                // 获取Codec数据结构
                AVCodecContext *pCodecCtx = pStream->codec;
                AVMediaType codecType = pCodecCtx->codec_type;
               
                CodecID codecId = pCodecCtx->codec_id;
               
                
                if(codecType == AVMEDIA_TYPE_VIDEO)
                {
                    // 获取Video基本信息
                    int width = pCodecCtx->width;
                    int height = pCodecCtx->height;
                    PixelFormat pixelFormat = pCodecCtx->pix_fmt;
                }
                else if(codecType == AVMEDIA_TYPE_AUDIO)
                {
                    // 获取Audio基本信息
                    int channels = pCodecCtx->channels;
                    int sample_rate = pCodecCtx->sample_rate;
                    AVSampleFormat sampleFmt = pCodecCtx->sample_fmt;
                }
            }
            // 释放
            if(pFormatCtx != NULL)
            {
                av_close_input_file(pFormatCtx);
                pFormatCtx = NULL;
            }    
        }
        
        2)读取sample数据(Read raw sample不解码)
        // 参考V3代码: interface IFileDecoder, media/impl/filedecoderimpl.cpp

        {
            // 参考Parser代码
            // av_register_all();
            // AVFormatContext * pFormatCtx = NULL;
            // err = av_open_input_file(&pFormatCtx, fileName, NULL, 0, NULL);
        
            AVPacket packet;
            av_init_packet(&packet);
            int ret = av_read_frame(pFormatCtx, &packet);
            if(ret >= 0)
            {
                int streamIndex = packet.stream_index;
                AVStream *pStream = pFormatCtx->streams[streamIndex];
                AVCodecContext *pCodecCtx = pStream->codec;
                // 计算timestamp
        
                // 转换时间到1/1000000秒
                AVRational time_base;
                time_base.num = 1;
                time_base.den = 1000000;
                
                // 25.0     1/25,   29.97    1001/30000
                
                // 获取 dts/pts
                const int64_t dts = av_rescale_q(packet.dts, pStream->time_base, time_base);
                const int64_t pts = av_rescale_q(packet.pts, pStream->time_base, time_base);
                uint8_t *data = packet.data;
                int size = packet.size;
                bool isKey = ((packet.flags & AV_PKT_FLAG_KEY) == AV_PKT_FLAG_KEY);    
            }
            av_free_packet(&packet);        
        }
        
        3)解码sample(Video ES=>YUV/RGB,  Audio ES=>PCM)
        // 参考V3代码: interface IVideoDecoder/IAudioDecoder, media/impl/videodecoderimpl.cpp/audiodecoderimpl.cpp
        {
            // 参考Parser,Read raw sample代码
            
            // AVMediaType codecType = pCodecCtx->codec_type;
            AVMediaType codecType = AVMEDIA_TYPE_VIDEO;
            // CodecId codecId = pCodecCtx->codec_id;
            CodecId codecId = CODEC_ID_H264;
            
            // 通过Codec ID查找解码器
            AVCodec *pCodec = avcodec_find_decoder(codecId);
            // 分配codec关联结构
            AVCodecContext *pCodecCtx = avcodec_alloc_context();

            // 设置一些必要的信息
            pCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO / AVMEDIA_TYPE_AUDIO;
            pCodecCtx->codec_id   = codecId;

            if(pCodec->capabilities & CODEC_CAP_TRUNCATED)
                pCodecCtx->flags |= CODEC_FLAG_TRUNCATED;

            // 在open codec时要加锁,否则多个codec同时打开时时会出现错误
            gMutexFFmpeg.lock();        
            // 打开Codec
            avcodec_open(pCodecCtx, pCodec);        
            gMutexFFmpeg.unlock();
            
            if(codecType == AVMEDIA_TYPE_VIDEO)
            {
                AVFrame *pSrcFrame = avcodec_alloc_frame();
                AVFrame *pDstFrame = avcodec_alloc_frame();
                
                // 因为内存的原因,所以需要多分配一些数据, FF_INPUT_BUFFER_PADDING_SIZE
                uint8_t *data = ...;
                int size = ...;
        
                while(size > 0))
                {
                    AVPacket pkt;
                    av_init_packet(&pkt);
                    pkt.data  = data;
                    pkt.size  = size;

                    int frameFinished = 0;
                    int bytesDecoded = avcodec_decode_video2(pCodecCtx, pSrcFrame, &frameFinished, &pkt);
                    if(bytesDecoded > 0)
                    {
                        data += bytesDecoded;
                        size -= bytesDecoded;
                    }
                    if(frameFinished)
                    {
                        int numBytes = avpicture_get_size(pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
                        uint8_t *pBuffer = new uint8_t[numBytes];
                        avpicture_fill((AVPicture *)pDstFrame, pBuffer, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
                        av_picture_copy((AVPicture *)pDstFrame, (AVPicture *)pSrcFrame, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
                        
                        // pBuffer/numBytes/pCodecCtx->pix_fmt : YUV/RGB数据
                        delete []pBuffer;                    
                    }
                    
                    if(bytesDecoded < 0)
                        break ;
                }
                av_free(pSrcFrame);
                av_free(pDstFrame);
            }
            else if(codecType == AVMEDIA_TYPE_AUDIO)
            {
                // 分配解码内存空间
                uint8_t *pBuffer = new uint8_t[AVCODEC_MAX_AUDIO_FRAME_SIZE];
        
                // 因为内存的原因,所以需要多分配一些数据, FF_INPUT_BUFFER_PADDING_SIZE
                uint8_t *data = ...;
                int size = ...;
        
                while(size > 0)
                {
                    AVPacket pkt;
                    av_init_packet(&pkt);
                    pkt.data  = data;
                    pkt.size  = size;
                    
                    int outSize = AVCODEC_MAX_AUDIO_FRAME_SIZE;
                    int bytesDecoded = avcodec_decode_audio3(pCodecCtx, (int16_t *)pBuffer, &outSize, &pkt);
                    if(bytesDecoded > 0)
                    {
                        data += bytesDecoded;
                        size -= bytesDecoded;
                    }
                    if((bytesDecoded >= 0) && (outSize > 0))
                    {
                        // pBuffer/outSize : PCM数据
                        // 格式
                        // pCodecCtx->channels;
                        // pCodecCtx->sample_fmt;
                        // pCodecCtx->sample_rate;
                    }                
                }            
            }
            
            gMutexFFmpeg.lock();        
            // 关闭和释放
            avcodec_close(pCodecCtx);
            gMutexFFmpeg.unlock();
            av_free(pCodecCtx);
        }
        
        4)视音频编码(YUV/RGB=>Video ES, PCM=>Audio ES)
        // 参考V3代码: media/videoencoder.cpp/audioencoder.cpp
        {
            // video encode
            avcodec_register_all();
            // 查找编码器
            AVCodec *avCodec = avcodec_find_encoder((CodecID)mConfig.codec);
            AVCodecContext *codecCtx = avcodec_alloc_context();
            codecCtx->codec_type    = AVMEDIA_TYPE_VIDEO;
            codecCtx->codec_id      = (CodecID)mConfig.codec;
            codecCtx->width         = mOutFormat.width;
            codecCtx->height        = mOutFormat.height;
            codecCtx->pix_fmt       = (PixelFormat)mOutFormat.pixelFormat;

            uint32 num = 0;
            uint32 den = 0;
            SampleUtil::FPS2Timescale(mOutFormat.frameRate, num, den);
            codecCtx->time_base.num = num;
            codecCtx->time_base.den = den;        
            codecCtx->bit_rate      = mConfig.bitRate*1000;
            codecCtx->max_b_frames  = 0;
            codecCtx->gop_size      = 100;
            if(codecCtx->codec_id == CODEC_ID_MPEG1VIDEO)
            {
                codecCtx->mb_decision = FF_MB_DECISION_RD;
            }
            else
            {
                codecCtx->mb_decision = FF_MB_DECISION_RD;
            }
            
            avcodec_open(codecCtx, avCodec);
            // 分配编码后的内存,分配为1MB
            mOutputBuffer.resize(1*1024*1024);
            
            AVFrame *pSrcFrame = avcodec_alloc_frame();
            
            avcodec_get_frame_defaults(pSrcFrame);
            int ret = avpicture_fill((AVPicture *)pSrcFrame, (uint8_t *)inData.data, (PixelFormat)mOutFormat.pixelFormat, mOutFormat.width, mOutFormat.height);

            AVRational time_base;
            time_base.num = 1;
            time_base.den = 1000000;
            pSrcFrame->pts = av_rescale_q(inData.dts, time_base, codecCtx->time_base);
            
            int bytesWritten = avcodec_encode_video(codecCtx, (uint8 *)mOutputBuffer.data(), mOutputBuffer.size(),
                isEmpty ? NULL : pSrcFrame);

            outData.data  = (char *)mOutputBuffer.data();
            outData.size  = bytesWritten;
            outData.isKey = (mCodecCtx->coded_frame->key_frame != 0);
        
        
            av_free(pSrcFrame);
            avcodec_close(codecCtx);
            av_free(codecCtx);
            
            
            // audio encode请看audioencoder.cpp 文件        
        }
        
        5)图像格式转换(YUV/RGB <=> YUV/RGB & Resize)
        // 参考代码: media/imageconverter.cpp
        {
            SwsContext *pSwsCtx = NULL;
            
           
            
            // resize 算法
            int swsFlags  = SWS_LANCZOS; // SWS_FAST_BILINEAR;
            // 初始化
            pSwsCtx = sws_getCachedContext(NULL, srcWidth, srcHeight, srcFmt,
                dstWidth, dstHeight, dstFmt, swsFlags, NULL, NULL, NULL);
            
            // 设置数据到结构 AVPicture
            AVPicture avSrcPic;
            AVPicture avDstPic;
            memset(&avSrcPic, 0, sizeof(avSrcPic));
            memset(&avDstPic, 0, sizeof(avDstPic));
            int dstRet = avpicture_fill(&avDstPic, (uint8_t *)pDstBuffer, dstFmt, dstWidth, dstHeight);
          
          {
            // pSrcBuffer - 源数据
            // pDstBuffer - 目标数据
            int srcRet = avpicture_fill(&avSrcPic, (uint8_t *)pSrcBuffer, srcFmt, srcWidth, srcHeight);

            // 执行转换
            sws_scale(pSwsCtx, avSrcPic.data, avSrcPic.linesize, 0, abs(srcHeight), avDstPic.data, avDstPic.linesize);
           }
           
            // 释放
            sws_freeContext(pSwsCtx);
        }
        
        6)封装格式(Muxer, .mp4/.avi/.mkv...)
        // 参考代码: interface IFileWriter, media/impl/filewriterimpl.cpp
        {
            av_register_all();

            AVFormatContext * pFormatCtx;
            avformat_alloc_output_context2(&pFormatCtx, NULL, "mp4", "c:\out.mp4");
            
            {
                // new video stream
                AVStream * avStream = av_new_stream(pFormatCtx, pFormatCtx->nb_streams;
                avcodec_get_context_defaults3(avStream->codec, NULL);

                AVCodecContext *codecCtx = avStream->codec;
                codecCtx->codec_id       = (CodecID)format->codecId;
                codecCtx->codec_type     = AVMEDIA_TYPE_VIDEO;
                codecCtx->width          = format->width;
                codecCtx->height         = format->height;
                codecCtx->bit_rate       = 800000;
                uint32 num = 0;
                uint32 den = 0;
                SampleUtil::FPS2Timescale(format->frameRate, num, den);
                codecCtx->time_base.num  = num;
                codecCtx->time_base.den  = den;
                av_set_pts_info(streamInfo->avStream, 64, num, den);
                
                if(pFormatCtx->oformat->flags & AVFMT_GLOBALHEADER)
                {
                    codecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;
                }
                switch(codecCtx->codec_id)
                {
                case CODEC_ID_H264:
                    {
                        AVBitStreamFilterContext * avFilter = av_bitstream_filter_init("h264_mp4toannexb");
                    }
                    break ;
                case CODEC_ID_AAC:
                    {
                        codecCtx->frame_size = 1024;
                        AVBitStreamFilterContext * avFilter = av_bitstream_filter_init("aac_adtstoasc");
                    }
                    break ;
                }
                // 设置解码相关数据, 比如H264要设置:SPS & PPS
                codecCtx->extradata_size = ;// size;
                codecCtx->extradata      = ;// (uint8_t *)av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
            }
            {
                // new stream
                AVStream * avStream = av_new_stream(pFormatCtx, pFormatCtx->nb_streams;
                avcodec_get_context_defaults3(avStream->codec, NULL);
            }
            
            err = av_set_parameters(pFormatCtx, NULL);
            // 以写的方式打开文件
            err = avio_open(&pFormatCtx->pb, "c:\out.mp4", AVIO_FLAG_WRITE);

            // 写文件头信息
            err = av_write_header(pFormatCtx);
            
            {
                const AVRational in_time_base = { 1, 1000000 };
                AVRational out_time_base = avStream->time_base;

                AVPacket pkt = { 0 };
                av_init_packet(&pkt);
                
                pkt.stream_index = streamId; // 流的id
                pkt.data  = ;//(uint8_t *)mediaSample->data();
                pkt.size  = ;//mediaSample->size();
                // 转换dts/pts时间单位  1/1000000=>avStream->time_base
                pkt.dts   = av_rescale_q(mediaSample->dts(), in_time_base, out_time_base);
                pkt.pts   = av_rescale_q(mediaSample->pts(), in_time_base, out_time_base);
                pkt.flags = mediaSample->isKey() ? AV_PKT_FLAG_KEY : 0;

                // 写入一帧数据
                int err = av_interleaved_write_frame(pFormatCtx, &pkt);

                av_free_packet(&pkt);            
            }
                    
            // 写文件尾信息
            av_write_trailer(pFormatCtx);
            
            // 释放
            // av_bitstream_filter_close(avFilter);
            avio_close(pFormatCtx->pb);
            avformat_free_context(pFormatCtx);
        }
        
        7)滤镜filter的使用(crop, resize, deinterlace, drawtext, overlay, vflip, ...)
        通过搭建若干个filter可以对视音频进行一系列的处理.
            
         a).Simple filtergraphs:
         
             reencode filter graph:
             _________                        __________              ______________
            |         |                      |          |            |              |
            | decoded |  simple filtergraph  | filtered |  encoder   | encoded data |
            | frames  | -------------------> | frames   | ---------> | packets      |
            |_________|                      |__________|            |______________|
            

            filter graph:
             _______        _____________        _______        _____        ________
            |       |      |             |      |       |      |     |      |        |
            | input | ---> | deinterlace | ---> | scale | ---> | fps | ---> | output |
            |_______|      |_____________|      |_______|      |_____|      |________|
            
            
                    int ret = av_vsink_buffer_get_video_buffer_ref(mBufferDstCtx, &picRef, 0);
    request_frame


    start_frame
    draw_slice
    end_frame
            
            
        b).Complex filtergraphs:
             _________
            |         |
            | input 0 |                    __________
            |_________|                   |          |
                            _________    /| output 0 |
                          |         |  / |__________|
             _________     | complex | /
            |         |     |         |/
            | input 1 |---->| filter  |
            |_________|     |         |    __________
                           /| graph   |  |          |
                          / |         |   | output 1 |
             _________   /  |_________|    |__________|
            |         | /
            | input 2 |/
            |_________|
            
        
        V3组件中实现的 interface IVideoPreprocess代码示例:
        代码文件:media/impl/videopreprocessimpl.cpp
        
        我们搭建的filter graph:
                                                                                             /1-->pad----
            input-->deinterlace-->fps-->logo remove-->color-->image overlaps-->crop-->resize<-0----------->output
                                                                                             2-->crop---/
                                                                                                 
            {
                avcodec_register_all();
                avfilter_register_all();
                
                AVFilterGraph * pFilterGraph = NULL;
                AVFilterContext * pBufferSrcCtx = NULL;
                AVFilterContext * pBufferDstCtx = NULL;
                
                AVFrame * pSrcFrame   = avcodec_alloc_frame();
                AVFrame * pSinkFrame  = avcodec_alloc_frame();
                AVFrame * pDstFrame   = avcodec_alloc_frame();

                // 设定输出格式列表,我们仅支持PIX_FMT_YUV420P
                PixelFormat pix_fmts[] = { PIX_FMT_YUV420P, PIX_FMT_NONE };
                char args[512];
                
                AVFilterContext *lastFilterCtx = NULL;
                
                // 我们使用到的filter,其中"nl_"开头的是我们自己写的filter
                // 输入buffer filter
                AVFilter *bufferFilter     = avfilter_get_by_name("buffer");
                // deinterlace filter, 目前使用yadif filter
                AVFilter *yadifFilter      = avfilter_get_by_name("yadif");
                // 我们自己实现的fps转换filter
                AVFilter *fpsFilter        = avfilter_get_by_name("nl_fps");
                // 我们自己实现的遮logo的filter,支持多个,动态设置,能够设定区间范围
                AVFilter *delogosFilter    = avfilter_get_by_name("nl_delogos");
                // 我们自己实现的调节对比度和亮度的filter
                AVFilter *colorFilter      = avfilter_get_by_name("nl_color");
                // 我们自己实现的叠加图片的filter,支持多个,动态设置,能够设定区间范围
                AVFilter *overlaysFilter   = avfilter_get_by_name("nl_overlays");
                // crop filter
                AVFilter *cropFilter       = avfilter_get_by_name("crop");
                // resize filter
                AVFilter *resizeFilter     = avfilter_get_by_name("scale");
                // 图像扩展filter,可以在图像边界填充特定的颜色
                AVFilter *padFilter        = avfilter_get_by_name("pad");
                // 输出buffer filter
                AVFilter *buffersinkFilter = avfilter_get_by_name("buffersink");

                // 创建graph
                pFilterGraph = avfilter_graph_alloc();

                // 开始创建filter
                
                AVRational tb  = { 1, 1000000 };
                AVRational sar = { 0, 1 };
                // 计算图像宽度比
                av_reduce(&sar.num, &sar.den, mConfig.width, mConfig.height, 1000*1000);

                // 设定 buffer filter的参数
                // w:h:pixfmt:time_base.num:time_base.den:sample_aspect_ratio.num:sample_aspect_ratio.den:sws_param
                sprintf(args, "%d:%d:%d:%d:%d:%d:%d",
                    mConfig.width, mConfig.height, mConfig.pixelFormat, tb.num, tb.den, sar.num, sar.den);

                // input filter
                err = avfilter_graph_create_filter(&pBufferSrcCtx,  bufferFilter,  "in", args, NULL, pFilterGraph);
                // 记录前一个filter context
                lastFilterCtx = pBufferSrcCtx;
            
                // 如果需要 deinterlace,则创建 yadif filter,同时和前一个filter进行连接
                // deinterlace : yadif
                if(mConfig.deinterlace > 0)
                {
                    if(yadifFilter == NULL)
                        break ;

                    // yadif filter的参数
                    // mode:parity
                    sprintf(args, "%d:%d", 0, -1);

                    // 创建filter,同时加入到graph
                    AVFilterContext *deinterlaceCtx = NULL;
                    err = avfilter_graph_create_filter(&deinterlaceCtx,  yadifFilter, "yadif", args, NULL, pFilterGraph);
                    if(err < 0)
                        break ;

                    // 和前一个filter进行连接
                    err = avfilter_link(lastFilterCtx, 0, deinterlaceCtx, 0);
                    if(err < 0)
                        break ;

                    lastFilterCtx = deinterlaceCtx;
                }
                // ... 中间略过
                        
                // 创建output filter
                err = avfilter_graph_create_filter(&pBufferDstCtx, buffersinkFilter, "out", NULL, pix_fmts, pFilterGraph);
                if(err < 0)
                    break ;
         
                // 和前一个filter进行连接
                err = avfilter_link(lastFilterCtx, 0, pBufferDstCtx, 0);
                if(err < 0)
                    break ;
                    
                // 配置 graph
                err = avfilter_graph_config(pFilterGraph, NULL);

                
                // 把输入frame填充到结构AVFrame
                avpicture_fill((AVPicture *)pSrcFrame, (uint8_t *)inMediaSample->data(),
                    (PixelFormat)mConfig.pixelFormat, mConfig.width, mConfig.height);
                pSrcFrame->width  = mConfig.width;
                pSrcFrame->height = mConfig.height;
                pSrcFrame->format = mConfig.pixelFormat;
                pSrcFrame->pts = inMediaSample->dts();

                // 开始写input写入frame
                ret = av_vsrc_buffer_add_frame(pBufferSrcCtx, pSrcFrame, AV_VSRC_BUF_FLAG_OVERWRITE);
                
                
                // 从输出filter查看输入是否可以获取数据,返回可获取的数目
                int count = avfilter_poll_frame(pBufferDstCtx->inputs[0]);
                if(count > 0)
                {
                    AVFilterBufferRef *picRef = NULL;
                    // 从输出filter中获取结果
                    int ret = av_vsink_buffer_get_video_buffer_ref(pBufferDstCtx, &picRef, 0);
                    if(picRef != NULL)
                    {
                        // 转换AVFilterBufferRef到AVFrame
                        avfilter_fill_frame_from_video_buffer_ref(pSinkFrame, picRef);
                        pSinkFrame->format = picRef->format;
                        pSinkFrame->width  = picRef->video->w;
                        pSinkFrame->height = picRef->video->h;
                        
                        const int numBytes = avpicture_get_size((PixelFormat)pSinkFrame->format, pSinkFrame->width, pSinkFrame->height);
                        // 转换时间单位
                        AVRational tb  = { 1, 1000000 };
                        const int64 dts = av_rescale_q(picRef->pts, mBufferDstCtx->inputs[0]->time_base, tb);
                        // 获取图像数据
                        avpicture_fill((AVPicture *)pDstFrame, (uint8_t *)mediaSample->data(),
                            (PixelFormat)pSinkFrame->format, pSinkFrame->width, pSinkFrame->height);

                        av_picture_copy((AVPicture *)pDstFrame, (AVPicture *)pSinkFrame,
                            (PixelFormat)pSinkFrame->format, pSinkFrame->width, pSinkFrame->height);                        
                        
                        // 释放buffer计数器
                        avfilter_unref_buffer(picRef);
                    }
                }
            }                                                                                         
                                                                                                

         更多文章,请访问我的个人网站 : 
    http://www.codelive.cn/

  • 相关阅读:
    The Tower of Hanoi
    POJ 3259:Wormholes
    第二数学归纳法
    Josephus Problem
    想成为Java高手的25个学习目标
    How to find a cycle of length 4?
    Fabonacci Numbers
    通过参数离线安装SharePoint 2010[转]
    Sharepoint2010文档库权限问题
    BizTalk 2010 学习笔记——第一章 BizTalk 2010 概述
  • 原文地址:https://www.cnblogs.com/sunminmin/p/4976247.html
Copyright © 2011-2022 走看看