zoukankan      html  css  js  c++  java
  • Qt FFMPEG+OpenCV开启摄像头

    //ffmpegDecode.h
    
    #ifndef __FFMPEG_DECODE_H__
    #define __FFMPEG_DECODE_H__
    
    #include "global.h"
    
    extern "C"
    {
    #include "libavcodec/avcodec.h"
    #include "libavformat/avformat.h"
    //图像转换结构需要引入的头文件
    #include "libswscale/swscale.h"
    };
     
    class ffmpegDecode
    {
    public:
        ffmpegDecode(char * file = NULL);
            ~ffmpegDecode();
    
            cv::Mat getDecodedFrame();
            cv::Mat getLastFrame();
            int readOneFrame();
            int getFrameIndex(){return m_framIndex;};
    
        private:
            AVFrame *pAvFrame;
            AVFormatContext *pFormatCtx;
            AVCodecContext  *pCodecCtx;
            AVCodec         *pCodec;
    
            int i;
            int videoindex;
            int m_framIndex;
            char *filepath;
            int ret, got_picture;
            SwsContext *img_convert_ctx;
            int y_size;
            AVPacket *packet;
    
            cv::Mat *pCvMat;
    
            bool m_initResult;
    
            bool init();
            bool openDecode();
            void prepare();
            void get(AVCodecContext *pCodecCtx, SwsContext *img_convert_ctx,AVFrame *pFrame);
    
    public:
            bool getInitResult();
    
    };
    
    #endif
    //ffmpegDecode.cpp
    
    #include "ffmpegDecode.h"
    #include <QDebug>
    
    int time_out = 0;
    int firsttimeplay = 1;
    int interrupt_cb(void *ctx)
    {
        // do something
    
        time_out++;
        if (time_out > 40) {
            time_out=0;
            if (firsttimeplay) {
                firsttimeplay=0;
                return -1;//这个就是超时的返回
            }
        }
        return 0;
    }
    
    ffmpegDecode :: ~ffmpegDecode()
    {
        pCvMat->release();
        
        pCvMat->release();
        //释放本次读取的帧内存
        av_free_packet(packet);
        avcodec_close(pCodecCtx);
        avformat_close_input(&pFormatCtx);  
    }
    
    ffmpegDecode :: ffmpegDecode(char * file)
    {
        firsttimeplay = 1;
        pAvFrame = NULL/**pFrameRGB = NULL*/;
        pFormatCtx  = NULL;
        pCodecCtx   = NULL;
        pCodec      = NULL;
    
        pCvMat = new cv::Mat();
        i=0;
        videoindex=0;
        m_framIndex =0;
        ret = 0;
        got_picture = 0;
        img_convert_ctx = NULL;
        y_size = 0;
        packet = NULL;
    
        if (NULL == file)
        {
            filepath =  "rtsp://admin:admin123@192.168.10.239:554";
        }
        else
        {
            filepath = file;
        }
    
        m_initResult = false;
        if(init())
        {
            if(openDecode())
            {
                prepare();
                m_initResult =true;
            }
        }
    
    }
    
    bool ffmpegDecode::getInitResult()
    {
        return m_initResult;
    }
    
    bool ffmpegDecode :: init()
    {
        printf("init start...
    ");
        //ffmpeg注册复用器,编码器等的函数av_register_all()。
        //该函数在所有基于ffmpeg的应用程序中几乎都是第一个被调用的。只有调用了该函数,才能使用复用器,编码器等。
        //这里注册了所有的文件格式和编解码器的库,所以它们将被自动的使用在被打开的合适格式的文件上。注意你只需要调用 av_register_all()一次,因此我们在主函数main()中来调用它。如果你喜欢,也可以只注册特定的格式和编解码器,但是通常你没有必要这样做。
        av_register_all();
         avformat_network_init();
    //    pFormatCtx = avformat_alloc_context();
    //    pFormatCtx->interrupt_callback.callback = interrupt_cb;//--------注册回调函数
    
    //    pFormatCtx->interrupt_callback.opaque = pFormatCtx;
        //打开视频文件,通过参数filepath来获得文件名。这个函数读取文件的头部并且把信息保存到我们给的AVFormatContext结构体中。
        //最后2个参数用来指定特殊的文件格式,缓冲大小和格式参数,但如果把它们设置为空NULL或者0,libavformat将自动检测这些参数。
        
        AVDictionary* options = NULL;  
            av_dict_set(&options, "rtsp_transport", "tcp", 0);  
            av_dict_set(&options, "stimeout", "2000000", 0); //设置超时断开连接时间,单位微秒
    
        if(avformat_open_input(&pFormatCtx,filepath,NULL,&options)!=0)
        {
            printf("无法打开文件
    ");
            return false;
        }
    
        //查找文件的流信息,avformat_open_input函数只是检测了文件的头部,接着要检查在文件中的流的信息
        if(avformat_find_stream_info(pFormatCtx,&options)<0)
        {
            printf("Couldn't find stream information.
    ");
            return false;
        }
        printf("init finished...
    ");
    
        return true;
    }
    
    bool ffmpegDecode :: openDecode()
    {
         printf("openDecode start...
    ");
        //在库里面查找支持该格式的解码器
        videoindex = -1;
        for(i=0; i<pFormatCtx->nb_streams; i++) 
        {
            if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
            {
                videoindex=i;
                break;
            }
        }
        if(videoindex==-1)
        {
            printf("Didn't find a video stream.
    ");
            return false;
        }
        pCodecCtx=pFormatCtx->streams[videoindex]->codec;
    
        //在库里面查找支持该格式的解码器
        pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
        if(pCodec==NULL)
        {
            printf("Codec not found.
    ");
            return false;
        }
    
        //打开解码器
        if(avcodec_open2(pCodecCtx, pCodec,NULL) < 0)
        {
            printf("Could not open codec.
    ");
            return false;
        }
          printf("openDecode finished
    ");
    
          return true;
    }
    
    void ffmpegDecode :: prepare()
    {
        //printf("prepare int
    ");
      //分配一个帧指针,指向解码后的原始帧
        pAvFrame=av_frame_alloc();
        y_size = pCodecCtx->width * pCodecCtx->height;
        //分配帧内存
        packet=(AVPacket *)av_malloc(sizeof(AVPacket));
        av_new_packet(packet, y_size);
    
        //输出一下信息-----------------------------
        printf("文件信息-----------------------------------------
    ");
        av_dump_format(pFormatCtx,0,filepath,0);
        //av_dump_format只是个调试函数,输出文件的音、视频流的基本信息了,帧率、分辨率、音频采样等等
      //printf("prepare out
    ");
    }
    
    int ffmpegDecode :: readOneFrame()
    {
        int result = 0;
        pCvMat->release();
        result = av_read_frame(pFormatCtx, packet);
        return result;
    }
    
    cv::Mat ffmpegDecode :: getDecodedFrame()
    {
        readOneFrame();
        if(packet->stream_index==videoindex)
        {
            //解码一个帧
            ret = avcodec_decode_video2(pCodecCtx, pAvFrame, &got_picture, packet);
            if(ret < 0)
            {
                printf("解码错误
    ");
                return cv::Mat();
            }
            if(got_picture)
            {
                m_framIndex++;
                //根据编码信息设置渲染格式
                if(img_convert_ctx == NULL){
                    img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
                        pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
                        AV_PIX_FMT_BGR24, SWS_BICUBIC, NULL, NULL, NULL); 
                }   
                //----------------------opencv
                if (pCvMat->empty())
                {
                    pCvMat->create(cv::Size(pCodecCtx->width, pCodecCtx->height),CV_8UC3);
                }
    
                if(img_convert_ctx != NULL)  
                {  
                    get(pCodecCtx, img_convert_ctx, pAvFrame);
                }
            }
        }
        av_free_packet(packet);
        return *pCvMat;
    
    }
    cv::Mat ffmpegDecode :: getLastFrame()
    {
        ret = avcodec_decode_video2(pCodecCtx, pAvFrame, &got_picture, packet);
        if(got_picture) 
        {  
            //根据编码信息设置渲染格式
            img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_BGR24, SWS_BICUBIC, NULL, NULL, NULL); 
    
            if(img_convert_ctx != NULL)  
            {  
                get(pCodecCtx, img_convert_ctx,pAvFrame);
            }  
        } 
        return *pCvMat;
    }
    
    void ffmpegDecode :: get(AVCodecContext * pCodecCtx, SwsContext * img_convert_ctx, AVFrame * pFrame)
    {
        if (pCvMat->empty())
        {
            pCvMat->create(cv::Size(pCodecCtx->width, pCodecCtx->height),CV_8UC3);
        }
    
        AVFrame *pFrameRGB = NULL;
        uint8_t  *out_bufferRGB = NULL;
        pFrameRGB = av_frame_alloc();
    
        //给pFrameRGB帧加上分配的内存;
        int size = avpicture_get_size(AV_PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height);
        out_bufferRGB = new uint8_t[size];
        avpicture_fill((AVPicture *)pFrameRGB, out_bufferRGB, AV_PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height);
    
        //YUV to RGB
        sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
        
        memcpy(pCvMat->data,out_bufferRGB,size);
    
        delete[] out_bufferRGB;
        av_free(pFrameRGB);
    }
    //crtspdecodethread.h
    
    #ifndef CRTSPDECODETHREAD_H
    #define CRTSPDECODETHREAD_H
    
    #include <QThread>
    #include <QMutex>
    #include "ffmpegDecode.h"
    
    
    class CRTSPDecodeThread : public QThread
    {
        Q_OBJECT
    
    public:
        CRTSPDecodeThread(QObject* parent);
        ~CRTSPDecodeThread();
    
        void SetCameraParam(QString, int, int cameraID);
        void run();
    
    signals:
        void SendVideoFrame(cv::Mat);
        void SendDetectFrame(cv::Mat);
    
    private:
        ffmpegDecode* m_pVdoDecode;
        bool m_isExist;
        unsigned long m_FrameCount;
        int m_detectInterval;
    
        VideoCapture m_VideoCap;
    
        QString m_cameraURL;
        QMutex m_Mutex;
    
        int m_cameraID;
        bool m_decodeInitResult;
    };
    
    #endif // CRTSPDECODETHREAD_H
    //crtspdecodethread.cpp
    
    #include "crtspdecodethread.h"
    #include "ffmpegDecode.h"
    #include <QDebug>
    #include <QDateTime>
    #include <queue>
    #include <QMutexLocker>
    
    extern bool g_ImportLib;
     std::queue<ST_IMGINFO>         g_OrgImgQueue;
    
    
    CRTSPDecodeThread::CRTSPDecodeThread(QObject* parent):QThread(parent)
    {
        m_isExist =  false;
    }
    
    CRTSPDecodeThread::~CRTSPDecodeThread()
    {
        requestInterruption();
        quit();
        wait();
    
        m_isExist = true;
    }
    
    void CRTSPDecodeThread::SetCameraParam(QString strURL, int iInterval, int cameraID)
    {
        m_cameraID = cameraID;
        m_detectInterval = iInterval;
        m_cameraURL =strURL;
        if(m_cameraURL == "USB")
        {
            bool bRet = m_VideoCap.open(0);
            if(!bRet)
            {
                qDebug()<<"打开USB摄像头失败...";
            }
        }
        else
        {
            m_pVdoDecode = new ffmpegDecode((char*)strURL.toStdString().c_str());
            m_decodeInitResult = m_pVdoDecode->getInitResult();
        }
    
    }
    
    
    void CRTSPDecodeThread::run()
    {
    
        m_FrameCount = 0;
    
        cv::Mat img;
        unsigned long iRTSPOfflineTick = GetTickCount();
        while(!isInterruptionRequested())
        {
            if(m_isExist)
            {
                break;
            }
    
            if(m_cameraURL == "USB")
            {
                m_VideoCap>>img;
            }
            else
            {
                if(m_decodeInitResult)
                {
                    img =m_pVdoDecode->getDecodedFrame();
                }
            }
    
    
            if(!img.empty())
            {
                m_FrameCount++;
                //cvtColor(img, img, COLOR_BGR2RGB);
                iRTSPOfflineTick = GetTickCount();
    
                emit SendVideoFrame(img);
    
                if(m_FrameCount % m_detectInterval == 0)
                {
    
                    ST_IMGINFO imgInfo;
                     img.copyTo(imgInfo.img);
    
                     imgInfo.camera_id =m_cameraID;// m_pManager->GetCameraID();
    
                     QDateTime dtTime;
                     imgInfo.time = dtTime.currentDateTime().toString("yyyy-MM-dd HH:mm:ss");
    
                     QMutexLocker lock(&m_Mutex);
                     g_OrgImgQueue.push(imgInfo);
    
                }
    
                img.release();
            }
            else
            {
                qDebug()<<"获取原始视频帧失败...";
    
                if( (GetTickCount() -iRTSPOfflineTick ) > 1000*15)
                {
                    qDebug()<<"重新打开视频流...";
    
                    iRTSPOfflineTick = GetTickCount();
    
                    if(m_cameraURL == "USB")
                    {
                        bool bRet = m_VideoCap.open(0);
                        if(!bRet)
                        {
                            qDebug()<<"打开USB摄像头失败...";
                        }
                    }
                    else
                    {
                        delete m_pVdoDecode;
                        m_pVdoDecode = NULL;
    
                         m_pVdoDecode = new ffmpegDecode((char*)m_cameraURL.toStdString().c_str());
                         m_decodeInitResult = m_pVdoDecode->getInitResult();
                    }
                }
            }
        }
    }
  • 相关阅读:
    第一册:lesson thirty five。
    第一册:lesson thirty three。
    第一册:lesson thirty one。
    C#比较两个对象是否为同一个对象。
    第一册:lesson twentynine..
    第一册:lesson twenty seven。
    C#函数返回值。
    说明
    推荐一些python Beautiful Soup学习网址
    祝各位节日快乐!20151111
  • 原文地址:https://www.cnblogs.com/zhehan54/p/9242440.html
Copyright © 2011-2022 走看看