zoukankan      html  css  js  c++  java
  • FFMPEG:H264解码-SDL显示(RGB32、RGB24、YUV420P、YUV422) 分类: DirectX ffmpeg-SDL-VLC-Live555 VC++ 2014-11-25 17:45 726人阅读 评论(0) 收藏


    FFMpeg对视频文件进行解码的大致流程

    1. 注册所有容器格式: av_register_all()
    2. 打开文件: av_open_input_file()
    3. 从文件中提取流信息: av_find_stream_info()
    4. 穷举所有的流,查找其中种类为CODEC_TYPE_VIDEO
    5. 查找对应的解码器: avcodec_find_decoder()
    6. 打开编解码器: avcodec_open()
    7. 为解码帧分配内存: avcodec_alloc_frame()
    8. 不停地从码流中提取出帧数据: av_read_frame()
    9. 判断帧的类型,对于视频帧调用: avcodec_decode_video()
    10. 解码完后,释放解码器: avcodec_close()
    11. 关闭输入文件:av_close_input_file()

    //添加的库:avcodec.lib avdevice.lib avfilter.lib avformat.lib avutil.lib swscale.lib   SDL.lib 
    extern "C"
    {
    #include <libavcodec/avcodec.h>
    #include <libavformat/avformat.h>
    #include <libswscale/swscale.h>


    #include <libsdl/SDL.h>
    #include <libsdl/SDL_thread.h>
    };


    void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame) 
    {
    FILE *pFile;
    char szFilename[32];
    int  y;

    // Open file
    sprintf(szFilename, "frame%d.ppm", iFrame);
    pFile=fopen(szFilename, "wb");
    if(pFile==NULL)
    return;

    // Write header
    fprintf(pFile, "P6 %d %d 255 ", width, height);

    // Write pixel data
    for(y=0; y<height; y++)
    fwrite(pFrame->data[0]+y*pFrame->linesize[0], 1, width*3, pFile);

    // Close file
    fclose(pFile);
    }


    void CTest0Dlg::OnButton1() 
    {
    // TODO: Add your control notification handler code here
     AVFormatContext *pFormatCtx;
      int             i, videoStream;
      AVCodecContext  *pCodecCtx;
      AVCodec         *pCodec;
      AVFrame         *pFrame; 
      AVFrame         *pFrameRGB;
      AVPacket        packet;
      int             frameFinished;
      int             numBytes;
      uint8_t         *buffer;
      static int sws_flags = SWS_BICUBIC;
      struct SwsContext *img_convert_ctx;
       AVPicture pict;  
    //  argc = 2;
      char argv[100] = "d:\temp\VIDEO720576.264";
     // argv[1] = "d:\temp\ff.mpg";
     
      // /*注册所有可用的格式和编解码器*/
      av_register_all();
      
      // Open video file /*以输入方式打开一个媒体文件,也即源文件,codecs并没有打开,只读取了文件的头信息*/
      if(av_open_input_file(&pFormatCtx, argv, NULL, 0, NULL)!=0)
        return ; // Couldn't open file
      
      // Retrieve stream information
    /*通过读取媒体文件的中的包来获取媒体文件中的流信息,对于没有头信息的文件如(mpeg)是非常有用的,
    // 该函数通常重算类似mpeg-2帧模式的真实帧率,该函数并未改变逻辑文件的position.
    */
      if(av_find_stream_info(pFormatCtx)<0)
        return ; // Couldn't find stream information
      
      // Dump information about file onto standard error
    //该函数的作用就是检查下初始化过程中设置的参数是否符合规范
      dump_format(pFormatCtx, 0, argv, 0);
      
      // Find the first video stream
      videoStream=-1;
      printf("%d ",pFormatCtx->nb_streams);
      getchar();
      for(i=0; i<pFormatCtx->nb_streams; i++)
        if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO&&videoStream < 0)
     {
          videoStream=i;
          break;
        }
      if(videoStream==-1)
        return ; // Didn't find a video stream
      
      // Get a pointer to the codec context for the video stream
      pCodecCtx=pFormatCtx->streams[videoStream]->codec;
      
      // Find the decoder for the video stream
      pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
    /*通过code ID查找一个已经注册的音视频编码器,查找编码器之前,必须先调用av_register_all注册所有支持的编码器
    音视频编码器保存在一个链表中,查找过程中,函数从头到尾遍历链表,通过比较编码器的ID来查找
    */
      if(pCodec==NULL) {
        fprintf(stderr, "Unsupported codec! ");
        return ; // Codec not found
      }
      // Open codec
    //使用给定的AVCodec初始化AVCodecContext


      if(avcodec_open(pCodecCtx, pCodec)<0)
        return ; // Could not open codec


      //printf("name %s ",pCodec->name);
      //getchar();


      // Allocate video frame
      pFrame=avcodec_alloc_frame();
      
      // Allocate an AVFrame structure
      pFrameRGB=avcodec_alloc_frame();
      if(pFrameRGB==NULL)
        return ;
      
      // Determine required buffer size and allocate buffer
      numBytes=avpicture_get_size(PIX_FMT_RGB32, pCodecCtx->width,
             pCodecCtx->height);
      buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
      
      // Assign appropriate parts of buffer to image planes in pFrameRGB
      // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
      // of AVPicture
      avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB32,
       pCodecCtx->width, pCodecCtx->height);
      
      // Read frames and save first five frames to disk
      i=0;
      ////////////////////////////////////////////////////////////////
      if (SDL_Init(SDL_INIT_VIDEO) < 0)
      {
     fprintf(stderr, "can not initialize SDL:%s ", SDL_GetError());
     exit(1);
       }
      atexit(SDL_Quit);
      SDL_Surface *screen;
      screen = SDL_SetVideoMode(720, 576, 32, SDL_SWSURFACE|SDL_ANYFORMAT);
      if ( screen == NULL ) 
      {
     exit(2);
      }
      SDL_Surface *image;


        Uint32 rmask, gmask, bmask, amask;


        /* SDL interprets each pixel as a 32-bit number, so our masks must depend
           on the endianness (byte order) of the machine */
    #if 0//SDL_BYTEORDER == SDL_BIG_ENDIAN
        rmask = 0xff000000;
        gmask = 0x00ff0000;
        bmask = 0x0000ff00;
        amask = 0x000000ff;
    #else
        rmask = 0x000000ff;
        gmask = 0x0000ff00;
        bmask = 0x00ff0000;
        amask = 0xff000000;
    #endif


    image = SDL_CreateRGBSurface(SDL_SWSURFACE, 720, 576, 0,
    rmask, gmask, bmask, NULL);
        if(image == NULL) 
    {
            //fprintf(stderr, "CreateRGBSurface failed: %s ", SDL_GetError());
            exit(1);
        }
      //////////////////////////////////////////////////////////////////
      while(av_read_frame(pFormatCtx, &packet)>=0) 
      {
        // Is this a packet from the video stream?
        if(packet.stream_index==videoStream) 
     {
        // Decode video frame
        avcodec_decode_video(pCodecCtx, pFrame, &frameFinished, 
           packet.data, packet.size);
          
        // Did we get a video frame?
        if(frameFinished) 
        {
      // Convert the image from its native format to RGB




            img_convert_ctx = sws_getContext( pCodecCtx->width, 
                   pCodecCtx->height,
                   pCodecCtx->pix_fmt,
                   pCodecCtx->width, 
                   pCodecCtx->height,
                   PIX_FMT_RGB32,
                   sws_flags, NULL, NULL, NULL);
            sws_scale(img_convert_ctx,pFrame->data,pFrame->linesize,0,pCodecCtx->height,pFrameRGB->data,pFrameRGB->linesize);  
            sws_freeContext(img_convert_ctx);
         ////////////////////////////////////////////////////////////////
    memcpy(screen->pixels,buffer,720*576*4);

    SDL_UpdateRect(screen, 0, 0, image->w, image->h);

    /* Free the allocated BMP surface */
        SDL_FreeSurface(image);
    /////////////////////////////////////////////////////////////////
         // Save the frame to disk
         if((++i<=5))
           SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, i);
        }
    }
        
        // Free the packet that was allocated by av_read_frame
        av_free_packet(&packet);
      }
      
      // Free the RGB image
      av_free(buffer);
      av_free(pFrameRGB);
      
      // Free the YUV frame
      av_free(pFrame);
      
      // Close the codec
      avcodec_close(pCodecCtx);
      
      // Close the video file
      av_close_input_file(pFormatCtx);
      MessageBox("over");
    }


    void CTest0Dlg::OnButton2() 
    {
    // TODO: Add your control notification handler code here
     AVFormatContext *pFormatCtx;
      int             i, videoStream;
      AVCodecContext  *pCodecCtx;
      AVCodec         *pCodec;
      AVFrame         *pFrame; 
      AVFrame         *pFrameRGB;
      AVPacket        packet;
      int             frameFinished;
      int             numBytes;
      uint8_t         *buffer;
      static int sws_flags = SWS_BICUBIC;
      struct SwsContext *img_convert_ctx;
       AVPicture pict;  
    //  argc = 2;
      char argv[100] = "d:\temp\VIDEO720576.264";
     // argv[1] = "d:\temp\ff.mpg";
     
      // /*注册所有可用的格式和编解码器*/
      av_register_all();
      
      // Open video file /*以输入方式打开一个媒体文件,也即源文件,codecs并没有打开,只读取了文件的头信息*/
      if(av_open_input_file(&pFormatCtx, argv, NULL, 0, NULL)!=0)
        return ; // Couldn't open file
      
      // Retrieve stream information
    /*通过读取媒体文件的中的包来获取媒体文件中的流信息,对于没有头信息的文件如(mpeg)是非常有用的,
    // 该函数通常重算类似mpeg-2帧模式的真实帧率,该函数并未改变逻辑文件的position.
    */
      if(av_find_stream_info(pFormatCtx)<0)
        return ; // Couldn't find stream information
      
      // Dump information about file onto standard error
    //该函数的作用就是检查下初始化过程中设置的参数是否符合规范
      dump_format(pFormatCtx, 0, argv, 0);
      
      // Find the first video stream
      videoStream=-1;
      printf("%d ",pFormatCtx->nb_streams);
      getchar();
      for(i=0; i<pFormatCtx->nb_streams; i++)
        if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO&&videoStream < 0)
     {
          videoStream=i;
          break;
        }
      if(videoStream==-1)
        return ; // Didn't find a video stream
      
      // Get a pointer to the codec context for the video stream
      pCodecCtx=pFormatCtx->streams[videoStream]->codec;
      
      // Find the decoder for the video stream
      pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
    /*通过code ID查找一个已经注册的音视频编码器,查找编码器之前,必须先调用av_register_all注册所有支持的编码器
    音视频编码器保存在一个链表中,查找过程中,函数从头到尾遍历链表,通过比较编码器的ID来查找
    */
      if(pCodec==NULL) {
        fprintf(stderr, "Unsupported codec! ");
        return ; // Codec not found
      }
      // Open codec
    //使用给定的AVCodec初始化AVCodecContext


      if(avcodec_open(pCodecCtx, pCodec)<0)
        return ; // Could not open codec


      //printf("name %s ",pCodec->name);
      //getchar();


      // Allocate video frame
      pFrame=avcodec_alloc_frame();
      
      // Allocate an AVFrame structure
      pFrameRGB=avcodec_alloc_frame();
      if(pFrameRGB==NULL)
        return ;
      
      // Determine required buffer size and allocate buffer
      numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
             pCodecCtx->height);
      buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
      
      // Assign appropriate parts of buffer to image planes in pFrameRGB
      // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
      // of AVPicture
      avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
       pCodecCtx->width, pCodecCtx->height);
      
      // Read frames and save first five frames to disk
      i=0;
      ////////////////////////////////////////////////////////////////
      if (SDL_Init(SDL_INIT_VIDEO) < 0)
      {
     fprintf(stderr, "can not initialize SDL:%s ", SDL_GetError());
     exit(1);
       }
      atexit(SDL_Quit);
      SDL_Surface *screen;
      screen = SDL_SetVideoMode(720, 576, 24, SDL_SWSURFACE|SDL_ANYFORMAT);
      if ( screen == NULL ) 
      {
     exit(2);
      }
      SDL_Surface *image;


        Uint32 rmask, gmask, bmask, amask;


        /* SDL interprets each pixel as a 32-bit number, so our masks must depend
           on the endianness (byte order) of the machine */
    #if 0//SDL_BYTEORDER == SDL_BIG_ENDIAN
        rmask = 0xff000000;
        gmask = 0x00ff0000;
        bmask = 0x0000ff00;
        amask = 0x000000ff;
    #else
        rmask = 0x000000ff;
        gmask = 0x0000ff00;
        bmask = 0x00ff0000;
        amask = 0xff000000;
    #endif


    image = SDL_CreateRGBSurface(SDL_SWSURFACE, 720, 576, 0,
    rmask, gmask, bmask, NULL);
        if(image == NULL) 
    {
            //fprintf(stderr, "CreateRGBSurface failed: %s ", SDL_GetError());
            exit(1);
        }
      //////////////////////////////////////////////////////////////////
      while(av_read_frame(pFormatCtx, &packet)>=0) 
      {
        // Is this a packet from the video stream?
        if(packet.stream_index==videoStream) 
     {
        // Decode video frame
        avcodec_decode_video(pCodecCtx, pFrame, &frameFinished, 
           packet.data, packet.size);
          
        // Did we get a video frame?
        if(frameFinished) 
        {
      // Convert the image from its native format to RGB




            img_convert_ctx = sws_getContext( pCodecCtx->width, 
                   pCodecCtx->height,
                   pCodecCtx->pix_fmt,
                   pCodecCtx->width, 
                   pCodecCtx->height,
                   PIX_FMT_BGR24,
                   sws_flags, NULL, NULL, NULL);
            sws_scale(img_convert_ctx,pFrame->data,pFrame->linesize,0,pCodecCtx->height,pFrameRGB->data,pFrameRGB->linesize);  
            sws_freeContext(img_convert_ctx);
         ////////////////////////////////////////////////////////////////
    memcpy(screen->pixels,buffer,720*576*3);

    SDL_UpdateRect(screen, 0, 0, image->w, image->h);

    /* Free the allocated BMP surface */
        SDL_FreeSurface(image);
    /////////////////////////////////////////////////////////////////
         // Save the frame to disk
         if((++i<=5))
           SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, i);
        }
    }
        
        // Free the packet that was allocated by av_read_frame
        av_free_packet(&packet);
      }
      
      // Free the RGB image
      av_free(buffer);
      av_free(pFrameRGB);
      
      // Free the YUV frame
      av_free(pFrame);
      
      // Close the codec
      avcodec_close(pCodecCtx);
      
      // Close the video file
      av_close_input_file(pFormatCtx);
      MessageBox("over");
    }


    void CTest0Dlg::OnButton3() 
    {
    // TODO: Add your control notification handler code here
    // TODO: Add your control notification handler code here
     AVFormatContext *pFormatCtx;
      int             i, videoStream;
      AVCodecContext  *pCodecCtx;
      AVCodec         *pCodec;
      AVFrame         *pFrame; 
      AVFrame         *pFrameYUV;
      AVPacket        packet;
      int             frameFinished;
      int             numBytes;
      uint8_t         *buffer;
       SDL_Rect        rect;
      static int sws_flags = SWS_BICUBIC;
      struct SwsContext *img_convert_ctx;
       AVPicture pict;  
    //  argc = 2;
      char argv[100] = "d:\temp\VIDEO720576.264";
     // argv[1] = "d:\temp\ff.mpg";
     
      // /*注册所有可用的格式和编解码器*/
      av_register_all();
      
      // Open video file /*以输入方式打开一个媒体文件,也即源文件,codecs并没有打开,只读取了文件的头信息*/
      if(av_open_input_file(&pFormatCtx, argv, NULL, 0, NULL)!=0)
        return ; // Couldn't open file
      
      // Retrieve stream information
    /*通过读取媒体文件的中的包来获取媒体文件中的流信息,对于没有头信息的文件如(mpeg)是非常有用的,
    // 该函数通常重算类似mpeg-2帧模式的真实帧率,该函数并未改变逻辑文件的position.
    */
      if(av_find_stream_info(pFormatCtx)<0)
        return ; // Couldn't find stream information
      
      // Dump information about file onto standard error
    //该函数的作用就是检查下初始化过程中设置的参数是否符合规范
      dump_format(pFormatCtx, 0, argv, 0);
      
      // Find the first video stream
      videoStream=-1;
      printf("%d ",pFormatCtx->nb_streams);
      getchar();
      for(i=0; i<pFormatCtx->nb_streams; i++)
        if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO&&videoStream < 0)
     {
          videoStream=i;
          break;
        }
      if(videoStream==-1)
        return ; // Didn't find a video stream
      
      // Get a pointer to the codec context for the video stream
      pCodecCtx=pFormatCtx->streams[videoStream]->codec;
      
      // Find the decoder for the video stream
      pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
    /*通过code ID查找一个已经注册的音视频编码器,查找编码器之前,必须先调用av_register_all注册所有支持的编码器
    音视频编码器保存在一个链表中,查找过程中,函数从头到尾遍历链表,通过比较编码器的ID来查找
    */
      if(pCodec==NULL) {
        fprintf(stderr, "Unsupported codec! ");
        return ; // Codec not found
      }
      // Open codec
    //使用给定的AVCodec初始化AVCodecContext


      if(avcodec_open(pCodecCtx, pCodec)<0)
        return ; // Could not open codec


      //printf("name %s ",pCodec->name);
      //getchar();


      // Allocate video frame
      pFrame=avcodec_alloc_frame();
      
      // Allocate an AVFrame structure
      pFrameYUV=avcodec_alloc_frame();
      if(pFrameYUV==NULL)
        return ;
      
      // Determine required buffer size and allocate buffer
      numBytes=avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width,
             pCodecCtx->height);
      buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
      
      // Assign appropriate parts of buffer to image planes in pFrameRGB
      // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
      // of AVPicture
      avpicture_fill((AVPicture *)pFrameYUV, buffer, PIX_FMT_YUV420P,
       pCodecCtx->width, pCodecCtx->height);
      
      // Read frames and save first five frames to disk
      i=0;
      ////////////////////////////////////////////////////////////////
      if (SDL_Init(SDL_INIT_VIDEO) < 0)
      {
     fprintf(stderr, "can not initialize SDL:%s ", SDL_GetError());
     exit(1);
       }
      atexit(SDL_Quit);
      SDL_Surface *screen;
      screen = SDL_SetVideoMode(720, 576, 24, SDL_SWSURFACE|SDL_ANYFORMAT);
      if ( screen == NULL ) 
      {
     exit(2);
      }
      SDL_Overlay *image;


    image = SDL_CreateYUVOverlay(pCodecCtx->width,
    pCodecCtx->height,
    SDL_YV12_OVERLAY,
    screen);


        if(image == NULL) 
    {
            //fprintf(stderr, "CreateRGBSurface failed: %s ", SDL_GetError());
            exit(1);
        }
      //////////////////////////////////////////////////////////////////
      while(av_read_frame(pFormatCtx, &packet)>=0) 
      {
        // Is this a packet from the video stream?
        if(packet.stream_index==videoStream) 
     {
        // Decode video frame
        avcodec_decode_video(pCodecCtx, pFrame, &frameFinished, 
           packet.data, packet.size);
          
        // Did we get a video frame?
        if(frameFinished) 
        {
      // Convert the image from its native format to RGB
              SDL_LockYUVOverlay(image);


    // pFrameYUV->data[0] = image->pixels[0];//预先改变指针数据区,不用copy
    // pFrameYUV->data[1] = image->pixels[2];
    // pFrameYUV->data[2] = image->pixels[1];
    //
    // pFrameYUV->linesize[0] = image->pitches[0];
    // pFrameYUV->linesize[1] = image->pitches[2];
    // pFrameYUV->linesize[2] = image->pitches[1];


            img_convert_ctx = sws_getContext( pCodecCtx->width, 
                   pCodecCtx->height,
                   pCodecCtx->pix_fmt,
                   pCodecCtx->width, 
                   pCodecCtx->height,
                   PIX_FMT_YUV420P,
                   sws_flags, NULL, NULL, NULL);
            sws_scale(img_convert_ctx,pFrame->data,pFrame->linesize,0,pCodecCtx->height,pFrameYUV->data,pFrameYUV->linesize);  
            sws_freeContext(img_convert_ctx);


           memcpy(image->pixels[0], pFrameYUV->data[0],720*576);//拷贝数据yuv420,也可预先改变指针
      memcpy(image->pixels[2], pFrameYUV->data[1],720*576/4);
      memcpy(image->pixels[1], pFrameYUV->data[2],720*576/4);


    SDL_UnlockYUVOverlay(image);
     
    rect.x = 0;
    rect.y = 0;
    rect.w = pCodecCtx->width;
    rect.h = pCodecCtx->height;
    SDL_DisplayYUVOverlay(image, &rect);
        }
    }
        
        // Free the packet that was allocated by av_read_frame
        av_free_packet(&packet);
      }
      
      // Free the RGB image
      av_free(buffer);
      av_free(pFrameYUV);
      
      // Free the YUV frame
      av_free(pFrame);
      
      // Close the codec
      avcodec_close(pCodecCtx);
      
      // Close the video file
      av_close_input_file(pFormatCtx);
      MessageBox("over");
    }


    void CTest0Dlg::OnButton4() 
    {
    // TODO: Add your control notification handler code here
    // TODO: Add your control notification handler code here
     AVFormatContext *pFormatCtx;
      int             i, videoStream;
      AVCodecContext  *pCodecCtx;
      AVCodec         *pCodec;
      AVFrame         *pFrame; 
      AVFrame         *pFrameYUV;
      AVPacket        packet;
      int             frameFinished;
      int             numBytes;
      uint8_t         *buffer;
       SDL_Rect        rect;
      static int sws_flags = SWS_BICUBIC;
      struct SwsContext *img_convert_ctx;
       AVPicture pict;  
    //  argc = 2;
      char argv[100] = "d:\temp\VIDEO720576.264";
     // argv[1] = "d:\temp\ff.mpg";
     
      // /*注册所有可用的格式和编解码器*/
      av_register_all();
      
      // Open video file /*以输入方式打开一个媒体文件,也即源文件,codecs并没有打开,只读取了文件的头信息*/
      if(av_open_input_file(&pFormatCtx, argv, NULL, 0, NULL)!=0)
        return ; // Couldn't open file
      
      // Retrieve stream information
    /*通过读取媒体文件的中的包来获取媒体文件中的流信息,对于没有头信息的文件如(mpeg)是非常有用的,
    // 该函数通常重算类似mpeg-2帧模式的真实帧率,该函数并未改变逻辑文件的position.
    */
      if(av_find_stream_info(pFormatCtx)<0)
        return ; // Couldn't find stream information
      
      // Dump information about file onto standard error
    //该函数的作用就是检查下初始化过程中设置的参数是否符合规范
      dump_format(pFormatCtx, 0, argv, 0);
      
      // Find the first video stream
      videoStream=-1;
      printf("%d ",pFormatCtx->nb_streams);
      getchar();
      for(i=0; i<pFormatCtx->nb_streams; i++)
        if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO&&videoStream < 0)
     {
          videoStream=i;
          break;
        }
      if(videoStream==-1)
        return ; // Didn't find a video stream
      
      // Get a pointer to the codec context for the video stream
      pCodecCtx=pFormatCtx->streams[videoStream]->codec;
      
      // Find the decoder for the video stream
      pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
    /*通过code ID查找一个已经注册的音视频编码器,查找编码器之前,必须先调用av_register_all注册所有支持的编码器
    音视频编码器保存在一个链表中,查找过程中,函数从头到尾遍历链表,通过比较编码器的ID来查找
    */
      if(pCodec==NULL) {
        fprintf(stderr, "Unsupported codec! ");
        return ; // Codec not found
      }
      // Open codec
    //使用给定的AVCodec初始化AVCodecContext


      if(avcodec_open(pCodecCtx, pCodec)<0)
        return ; // Could not open codec


      //printf("name %s ",pCodec->name);
      //getchar();


      // Allocate video frame
      pFrame=avcodec_alloc_frame();
      
      // Allocate an AVFrame structure
      pFrameYUV=avcodec_alloc_frame();
      if(pFrameYUV==NULL)
        return ;
      
      // Determine required buffer size and allocate buffer
      numBytes=avpicture_get_size(PIX_FMT_YUV422, pCodecCtx->width,
             pCodecCtx->height);
      buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
      
      // Assign appropriate parts of buffer to image planes in pFrameRGB
      // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
      // of AVPicture
      avpicture_fill((AVPicture *)pFrameYUV, buffer, PIX_FMT_YUV422,
       pCodecCtx->width, pCodecCtx->height);
      
      // Read frames and save first five frames to disk
      i=0;
      ////////////////////////////////////////////////////////////////
      if (SDL_Init(SDL_INIT_VIDEO) < 0)
      {
     fprintf(stderr, "can not initialize SDL:%s ", SDL_GetError());
     exit(1);
       }
      atexit(SDL_Quit);
      SDL_Surface *screen;
      screen = SDL_SetVideoMode(720, 576, 24, SDL_SWSURFACE|SDL_ANYFORMAT);
      if ( screen == NULL ) 
      {
     exit(2);
      }
      SDL_Overlay *image;


    image = SDL_CreateYUVOverlay(pCodecCtx->width,
    pCodecCtx->height,
    SDL_YUY2_OVERLAY,
    screen);


        if(image == NULL) 
    {
            //fprintf(stderr, "CreateRGBSurface failed: %s ", SDL_GetError());
            exit(1);
        }
      //////////////////////////////////////////////////////////////////
      while(av_read_frame(pFormatCtx, &packet)>=0) 
      {
        // Is this a packet from the video stream?
        if(packet.stream_index==videoStream) 
     {
        // Decode video frame
        avcodec_decode_video(pCodecCtx, pFrame, &frameFinished, 
           packet.data, packet.size);
          
        // Did we get a video frame?
        if(frameFinished) 
        {
      // Convert the image from its native format to RGB
              SDL_LockYUVOverlay(image);


    // pFrameYUV->data[0] = image->pixels[0];
    // pFrameYUV->data[1] = image->pixels[2];
    // pFrameYUV->data[2] = image->pixels[1];
    //
    // pFrameYUV->linesize[0] = image->pitches[0];
    // pFrameYUV->linesize[1] = image->pitches[2];
    // pFrameYUV->linesize[2] = image->pitches[1];


            img_convert_ctx = sws_getContext( pCodecCtx->width, 
                   pCodecCtx->height,
                   pCodecCtx->pix_fmt,
                   pCodecCtx->width, 
                   pCodecCtx->height,
                   PIX_FMT_YUV422,
                   sws_flags, NULL, NULL, NULL);
            sws_scale(img_convert_ctx,pFrame->data,pFrame->linesize,0,pCodecCtx->height,pFrameYUV->data,pFrameYUV->linesize);  
            sws_freeContext(img_convert_ctx);


           memcpy(image->pixels[0], pFrameYUV->data[0],720*576*2);//拷贝数据yuv422




    SDL_UnlockYUVOverlay(image);
     
    rect.x = 0;
    rect.y = 0;
    rect.w = pCodecCtx->width;
    rect.h = pCodecCtx->height;
    SDL_DisplayYUVOverlay(image, &rect);
        }
    }
        
        // Free the packet that was allocated by av_read_frame
        av_free_packet(&packet);
      }
      
      // Free the RGB image
      av_free(buffer);
      av_free(pFrameYUV);
      
      // Free the YUV frame
      av_free(pFrame);
      
      // Close the codec
      avcodec_close(pCodecCtx);
      
      // Close the video file
      av_close_input_file(pFormatCtx);
      MessageBox("over");
    }

    http://download.csdn.net/detail/mao0514/8202691


    版权声明:本文为博主原创文章,未经博主允许不得转载。

  • 相关阅读:
    请输出in.txt文件中的2 4 6 8 9 10 12行
    shell 求总分
    快速排序小结
    串的模式匹配和KMP算法
    重定向和转发的区别
    servlet中文乱码问题
    JAXP简介
    DOM常用方法总结
    初探javascript
    现在网站主流排版方式
  • 原文地址:https://www.cnblogs.com/mao0504/p/4706478.html
Copyright © 2011-2022 走看看