zoukankan      html  css  js  c++  java
  • 音视频入门-8-ffmpeg小实验-v4l2 ubuntu取图、格式转换、编码成H264,ffplay观看

    1. getpic_transform_encode2h264.c

    #include <stdio.h>
    #include <string.h>
    #include <stdlib.h>
    #include <unistd.h>
    #include <string.h>
    #include <time.h>
    #include <sys/time.h>
    #include "avformat.h"
    #include "avcodec.h"
    #include "avdevice.h"
    #include <libavutil/imgutils.h>  
    #include <libswscale/swscale.h> 
     
    
    /*  知识点: ffmpeg新版本中(封装流)AVStream的codec参数被codecpar参数所替代
        该知识点的讲解博客
        https://blog.csdn.net/weixin_34419326/article/details/91775446?utm_medium=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-1.control&depth_1-utm_source=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-1.control
    */
    
    #define    LOOP_NUM            300
    #define OUT_WIDTH            320
    #define OUT_HEIGHT            240
     
    char* input_name= "video4linux2";
    char* file_name = "/dev/video0";
     
    struct timeval  time_val;
    float      time_start;
    float      time_end;
     
    float get_diff_time(struct timeval* start , int update)
    {
        float dt;
        struct timeval now;
        gettimeofday(&now, NULL);
        dt = (float)(now.tv_sec  - start->tv_sec);
        dt += (float)(now.tv_usec - start->tv_usec) * 1e-6;
        
        if (update == 1) {
            start->tv_sec = now.tv_sec;
            start->tv_usec = now.tv_usec;
        }
        
        return dt;
    }
     
    int flush_encoder(AVFormatContext *fmt_ctx,unsigned int stream_index){
        int ret;
        int got_frame;
        AVPacket enc_pkt;
        if (!(fmt_ctx->streams[stream_index]->codec->codec->capabilities & AV_CODEC_CAP_DELAY))
            return 0;
     
        while (1) {
            enc_pkt.data = NULL;
            enc_pkt.size = 0;
            av_init_packet(&enc_pkt);
            ret = avcodec_encode_video2 (fmt_ctx->streams[stream_index]->codec, &enc_pkt,
                NULL, &got_frame);
            av_frame_free(NULL);
            if (ret < 0)
                break;
            if (!got_frame){
                ret=0;
                break;
            }
            printf("Flush Encoder: Succeed to encode 1 frame!	size:%5d
    ",enc_pkt.size);
            ret = av_write_frame(fmt_ctx, &enc_pkt);
            if (ret < 0)
                break;
        }
        return ret;
    }
     
     
    void captureFrame(void){
        AVFormatContext *fmtCtx = NULL;    
        AVInputFormat     *inputFmt;
        AVPacket         *packet;
        AVCodecContext    *pCodecCtx;
        AVCodec         *pCodec; 
        struct SwsContext *sws_ctx;  
        FILE *fp; 
        int i;
        int ret;
        int videoindex;
        
        enum AVPixelFormat dst_pix_fmt = AV_PIX_FMT_YUV420P;
        const char *dst_size = NULL;  
        const char *src_size = NULL;  
        uint8_t *src_data[4];   
        uint8_t *dst_data[4];  
        int src_linesize[4];  
        int dst_linesize[4];  
        int src_bufsize;  
        int dst_bufsize;  
        int src_w ;  
        int src_h ;  
        int dst_w = OUT_WIDTH;   
        int dst_h = OUT_HEIGHT;
        
        inputFmt = av_find_input_format (input_name);    
       
        if (inputFmt == NULL)    {        
            printf("can not find_input_format
    ");        
            return;    
        }    
     
        if (avformat_open_input ( &fmtCtx, file_name, inputFmt, NULL) < 0){
            printf("can not open_input_file
    ");         return;    
        }
        
        av_dump_format(fmtCtx, 0, file_name, 0);
     
        videoindex= -1;
        for(i=0; i<fmtCtx->nb_streams; i++) 
            if(fmtCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
                videoindex=i;
                break;
            }
        if(videoindex==-1){
            printf("Didn't find a video stream.
    ");
            return -1;
        }
     
        pCodecCtx = fmtCtx->streams[videoindex]->codec;
        pCodec    = avcodec_find_decoder(pCodecCtx->codec_id); 
     
        printf("picture width   =  %d 
    ", pCodecCtx->width);
        printf("picture height  =  %d 
    ", pCodecCtx->height);
        printf("Pixel   Format  =  %d 
    ", pCodecCtx->pix_fmt);
            
        sws_ctx = sws_getContext( pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, dst_w, dst_h, dst_pix_fmt,  
                                 SWS_BILINEAR, NULL, NULL, NULL); 
     
        src_bufsize = av_image_alloc(src_data, src_linesize, pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, 16);
        dst_bufsize = av_image_alloc(dst_data, dst_linesize, dst_w, dst_h, dst_pix_fmt, 1);
        
        packet = (AVPacket *)av_malloc(sizeof(AVPacket));  
     
        /* set out format */
        AVFormatContext        *outFormatCtx;
        AVOutputFormat        *outfmt;
        AVCodecContext        *outCodecCtx;  
        AVStream            *video_st;
        AVDictionary         *param = 0;
        AVCodec                *outCodec;
        AVFrame                *outFrame;
        AVPacket             outpkt;
     
        uint8_t *picture_buf; 
        char    *out_file  = "ds.h264";
        int     picture_size;
        int     y_size;
        int        got_picture;
        int     loop = 0;
     
        outFormatCtx = avformat_alloc_context();
        outfmt = av_guess_format(NULL, out_file, NULL);  
        outFormatCtx->oformat = outfmt; 
        if (avio_open(&outFormatCtx->pb, out_file, AVIO_FLAG_READ_WRITE) < 0){  
            printf("Failed to open output file! 
    ");  
            return -1;  
        }
        video_st = avformat_new_stream(outFormatCtx, 0); 
        if (video_st==NULL){
            printf(" creat new stream err 
     ");  
            return -1;  
        }  
        
        outCodecCtx = video_st->codec;
        outCodecCtx->codec_id = outfmt->video_codec;
        outCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
        outCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
        outCodecCtx->width = dst_w;    
        outCodecCtx->height = dst_h;  
        outCodecCtx->bit_rate = 2000000;    
        outCodecCtx->gop_size=10;  
      
        outCodecCtx->time_base.num = 1;    
        outCodecCtx->time_base.den = 25;   
        outCodecCtx->qmin = 10;  
        outCodecCtx->qmax = 51;  
        outCodecCtx->max_b_frames=3; 
        
        if(pCodecCtx->codec_id == AV_CODEC_ID_H264) { 
            av_dict_set(&param,"preset", "faster", 0);
            //av_dict_set(&param,"preset", "slow", 0);
            av_dict_set(&param,"tune", "zerolatency", 0);
        }
     
        av_dump_format(outFormatCtx, 0, out_file, 1);
        
        outCodec = avcodec_find_encoder(outCodecCtx->codec_id);
        if (!outCodec){  
            printf("Can not find encoder! 
    ");  
            return -1;  
        } 
     
        if (avcodec_open2(outCodecCtx, outCodec, &param) < 0){  
            printf("Failed to open encoder! 
    ");  
            return -1;  
        }  
     
        outFrame = av_frame_alloc();
        picture_size = avpicture_get_size(outCodecCtx->pix_fmt, outCodecCtx->width, outCodecCtx->height);
        picture_buf = (uint8_t *)av_malloc(picture_size); 
        avpicture_fill((AVPicture *)outFrame, picture_buf, outCodecCtx->pix_fmt, outCodecCtx->width, outCodecCtx->height);
        outFrame->format = outCodecCtx->pix_fmt;
        outFrame->width  = outCodecCtx->width;
        outFrame->height = outCodecCtx->height;    
        avformat_write_header(outFormatCtx,NULL);
        av_new_packet(&outpkt,picture_size);
        y_size = outCodecCtx->width * outCodecCtx->height; 
        
        time_start = get_diff_time(&time_val, 1);
        while(loop++ < LOOP_NUM){
            av_read_frame(fmtCtx, packet);
            memcpy(src_data[0], packet->data, packet->size);
            sws_scale(sws_ctx, src_data,  src_linesize, 0, pCodecCtx->height, dst_data, dst_linesize); 
            
     
            outFrame->data[0] = dst_data[0];
            outFrame->data[1] = dst_data[0] + y_size;
            outFrame->data[2] = dst_data[0] + y_size*5/4;
            
            outFrame->pts=(loop -1)*(video_st->time_base.den)/((video_st->time_base.num)*25);
            ret = avcodec_encode_video2(outCodecCtx, &outpkt, outFrame, &got_picture);
            if(ret < 0)
            {
                printf("Failed to encode! 
    ");
                return -1;
            } 
     
            if(got_picture==1){
                outpkt.stream_index = video_st->index;
                ret = av_write_frame(outFormatCtx, &outpkt);
                av_free_packet(&outpkt); 
            }
        }
     
        time_end = get_diff_time(&time_val, 0);
        printf("
    
    encoder %d frame  spend time = %f 
    
    ",loop, time_end);
        
        ret = flush_encoder(outFormatCtx,0);
        if(ret < 0){
            printf("Flushing encoder failed
    ");  
            return -1;
        }
     
        av_write_trailer(outFormatCtx);  
        if (video_st){  
            avcodec_close(video_st->codec);  
            av_free(outFrame);  
            av_free(picture_buf);  
        }  
        avio_close(outFormatCtx->pb);  
        avformat_free_context(outFormatCtx); 
        
        av_free_packet(packet);    
        av_freep(&dst_data[0]);
        sws_freeContext(sws_ctx);
        avformat_close_input(&fmtCtx);
     } 
     
    int main(void){    
        av_register_all(); 
        avcodec_register_all();    
        avdevice_register_all();    
        captureFrame();    
        return 0;
    }
     

    2. makefile

    OUT_APP         = test
    INCLUDE_PATH = /usr/local/ffmpeg/include/
    INCLUDE = -I$(INCLUDE_PATH) -I$(INCLUDE_PATH)libavutil/ -I$(INCLUDE_PATH)libavdevice/ 
                -I$(INCLUDE_PATH)libavcodec/ -I$(INCLUDE_PATH)libswresample 
                -I$(INCLUDE_PATH)libavfilter/ -I$(INCLUDE_PATH)libavformat 
                -I$(INCLUDE_PATH)libswscale/
     
    LIB_PATH = /usr/local/ffmpeg/lib/
    FFMPEG_LIBS = -L$(LIB_PATH) -lavformat -lavutil -lavdevice -lavcodec -lswresample -lavfilter -lswscale
    SDL_LIBS    = 
    LIBS        = $(FFMPEG_LIBS)$(SDL_LIBS)
     
    COMPILE_OPTS = $(INCLUDE)
    C              = c
    OBJ          = o
    C_COMPILER   = cc
    C_FLAGS      = $(COMPILE_OPTS) $(CPPFLAGS) $(CFLAGS)
     
    LINK          = cc -o 
    LINK_OPTS    = -lz -lm  -lpthread
    LINK_OBJ     = getpic_transform_encode2h264.o 
     
    .$(C).$(OBJ):
        $(C_COMPILER) -c -g $(C_FLAGS) $<
     
     
    $(OUT_APP): $(LINK_OBJ)
        $(LINK)$@  $(LINK_OBJ)  $(LIBS) $(LINK_OPTS)
     
    clean:
            -rm -rf *.$(OBJ) $(OUT_APP) core *.core *~  picture

    实测可能是编码的时间过长,视频分辨率又高,所以导致播放出来跟快进一样了。 以后再分析吧。

    .

    /************* 社会的有色眼光是:博士生、研究生、本科生、车间工人; 重点大学高材生、普通院校、二流院校、野鸡大学; 年薪百万、五十万、五万; 这些都只是帽子,可以失败千百次,但我和社会都觉得,人只要成功一次,就能换一顶帽子,只是社会看不见你之前的失败的帽子。 当然,换帽子决不是最终目的,走好自己的路就行。 杭州.大话西游 *******/
  • 相关阅读:
    imx6 关闭调试串口
    imx6 Image Vector Table (IVT)
    imx6 DDR_Stress_Test
    java安装1.8和1.7,报错:Error: Registry key 'SoftwareJavaSoftJava Runtime Environment'CurrentVers
    maven安装与环境变量配置
    14.商品添加功能
    MyBatis 接口的使用
    MyBatis 的缓存机制
    MyBatis 别名标签 & sql的复用
    MyBatis 多表查询
  • 原文地址:https://www.cnblogs.com/happybirthdaytoyou/p/14272875.html
Copyright © 2011-2022 走看看