zoukankan      html  css  js  c++  java
  • ffmpeg 在ubuntu上编译环境搭建和开发

    步骤如下:

    1. 下载

    官网永远是王道,呵呵:http://ffmpeg.org/download.html

    或者 svn checkout svn://svn.mplayerhq.hu/ffmpeg/trunk ffmpeg

    2. 编译

      • 运行./configure
        很不幸,运行configure后出现了错误提示:
        yasm not found, use –disable-yasm for a crippled build

        解决方案:sudo apt-getinstall yasm
        重新./configure,搞定

      • make

      • make install
        权限不够需要前面加上sudo

      • 编译源码:一定注意加载库的顺序.

      • 参考代码:
      • #include <SDL/SDL.h>
        
        #include <libavcodec/avcodec.h>
        #include <libavformat/avformat.h>
        #include <stdio.h>
        #include <libswscale/swscale.h>
        
        int main(int argc, char *argv[]) {
            AVFormatContext *pFormatCtx;
            int i, videoStream;
            AVCodecContext *pCodecCtx;
            AVCodec *pCodec;
            AVFrame *pFrame;
            AVFrame *pFrameYUV;
            AVPacket packet;
            int frameFinished;
            int numBytes;
            
        // Register all formats and codecs
            av_register_all();
        // Open video file
            if (av_open_input_file(&pFormatCtx, "/home/user/workspace/panda/media/video/4f5a9c384d94eb21e5273ec263457535.mp4", NULL, 0, NULL )
                    != 0) {
                printf("===  cannot open file
        ===");
                return -1; // Couldn't open file
            }
        // Retrieve stream information
            if (av_find_stream_info(pFormatCtx) < 0)
                return -1; // Couldn't find stream information
        // Dump information about file onto standard error
        //    dump_format(pFormatCtx, 0, argv[1], false);
        // Find the first video stream
            videoStream = -1;
            for (i = 0; i < pFormatCtx->nb_streams; i++)
                if (pFormatCtx->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO) ////////
                {
                    videoStream = i;
                    break;
                }
            if (videoStream == -1)
                return -1; // Didn't find a video stream
        // Get a pointer to the codec context for the video stream
            pCodecCtx = pFormatCtx->streams[videoStream]->codec; //////////
        ///////// SDL initialization
            SDL_Surface *screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, SDL_HWSURFACE);
            SDL_Overlay *overlay = SDL_CreateYUVOverlay(pCodecCtx->width, pCodecCtx->height, SDL_YV12_OVERLAY, screen);
            static SDL_Rect rect;
            rect.x = 0;
            rect.y = 0;
            rect.w = pCodecCtx->width;
            rect.h = pCodecCtx->height;
        //////////
        // Find the decoder for the video stream
            pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
            if (pCodec == NULL )
                return -1; // Codec not found
        // Open codec
            if (avcodec_open(pCodecCtx, pCodec) < 0)
                return -1; // Could not open codec
                
        // Allocate video frame
            pFrame = avcodec_alloc_frame();
        // Allocate an AVFrame structure
            pFrameYUV = avcodec_alloc_frame();
            if (pFrameYUV == NULL )
                return -1;
            
            static struct SwsContext *img_convert_ctx;
            
            img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
        //                                                                PIX_FMT_RGB24,
                    PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL );
            
        // Set SDL events
            SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
            SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
            SDL_ShowCursor(SDL_DISABLE);
            
        // Read frames
            while ((av_read_frame(pFormatCtx, &packet) >= 0) && (SDL_PollEvent(NULL ) == 0)) {
        // Is this a packet from the video stream?
                if (packet.stream_index == videoStream) {
        // Decode video frame
                    avcodec_decode_video(pCodecCtx, pFrame, &frameFinished, packet.data, packet.size);
        // Did we get a video frame?
                    if (frameFinished) {
        // Convert the image from its native format to YUV, and display
                        
                        SDL_LockYUVOverlay(overlay);
                        pFrameYUV->data[0] = overlay->pixels[0];
                        pFrameYUV->data[1] = overlay->pixels[2];
                        pFrameYUV->data[2] = overlay->pixels[1];
                        
                        pFrameYUV->linesize[0] = overlay->pitches[0];
                        pFrameYUV->linesize[1] = overlay->pitches[2];
                        pFrameYUV->linesize[2] = overlay->pitches[1];
                        
        //                img_convert((AVPicture *) pFrameYUV, PIX_FMT_YUV420P, (AVPicture *) pFrame, pCodecCtx->pix_fmt, pCodecCtx->width,
        //                        pCodecCtx->height);
                        
                        // other codes
                        // Convert the image from its native format to RGB
                        
                        sws_scale(img_convert_ctx, (const uint8_t* const *) pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data,
                                pFrameYUV->linesize);
                        
                        SDL_UnlockYUVOverlay(overlay);
                        SDL_DisplayYUVOverlay(overlay, &rect);
        ///
                        SDL_Delay(30);
                    }
                }
        // Free the packet that was allocated by av_read_frame
                av_free_packet(&packet);
            }
        // Free the RGB image
            av_free(pFrameYUV);
        // Free the YUV frame
            av_free(pFrame);
        // Close the codec
            avcodec_close(pCodecCtx);
        // Close the video file
            av_close_input_file(pFormatCtx);
        //
            SDL_FreeYUVOverlay(overlay);
            return 0;
        }
      • 放大播放:
      • #include <SDL/SDL.h>
        
        #include <libavcodec/avcodec.h>
        #include <libavformat/avformat.h>
        #include <stdio.h>
        #include <libswscale/swscale.h>
        
        int avcodec_main(int argc, char *argv[]) {
        	AVFormatContext *pFormatCtx;
        	int i, videoStream;
        	AVCodecContext *pCodecCtx;
        	AVCodec *pCodec;
        	AVFrame *pFrame;
        	AVFrame *pFrameYUV;
        	AVPacket packet;
        	int frameFinished;
        	int numBytes;
        
        // Register all formats and codecs
        	av_register_all();
        // Open video file
        	if (av_open_input_file(&pFormatCtx, "/home/user/workspace/panda/media/video/4f5a9c384d94eb21e5273ec263457535.mp4", NULL, 0, NULL )
        			!= 0) {
        		printf("===  cannot open file
        ===");
        		return -1; // Couldn't open file
        	}
        // Retrieve stream information
        	if (av_find_stream_info(pFormatCtx) < 0)
        		return -1; // Couldn't find stream information
        // Dump information about file onto standard error
        //	dump_format(pFormatCtx, 0, argv[1], false);
        // Find the first video stream
        	videoStream = -1;
        	for (i = 0; i < pFormatCtx->nb_streams; i++)
        		if (pFormatCtx->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO) ////////
        		{
        			videoStream = i;
        			break;
        		}
        	if (videoStream == -1)
        		return -1; // Didn't find a video stream
        // Get a pointer to the codec context for the video stream
        	pCodecCtx = pFormatCtx->streams[videoStream]->codec; //////////
        
        ///////// SDL initialization
        	int w = 1920, h = 1080;
        
        	SDL_Surface *screen = SDL_SetVideoMode(w, h, 0, SDL_HWSURFACE);
        	SDL_Overlay *overlay = SDL_CreateYUVOverlay(w, h, SDL_YV12_OVERLAY, screen);
        	static SDL_Rect rect;
        	rect.x = 0;
        	rect.y = 0;
        	rect.w = w;
        	rect.h = h;
        //////////
        // Find the decoder for the video stream
        	pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
        	if (pCodec == NULL )
        		return -1; // Codec not found
        // Open codec
        	if (avcodec_open(pCodecCtx, pCodec) < 0)
        		return -1; // Could not open codec
        
        // Allocate video frame
        	pFrame = avcodec_alloc_frame();
        // Allocate an AVFrame structure
        	pFrameYUV = avcodec_alloc_frame();
        	if (pFrameYUV == NULL )
        		return -1;
        
        	static struct SwsContext *img_convert_ctx;
        
        	img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, w, h,
        //				                                                PIX_FMT_RGB24,
        			PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL );
        
        // Set SDL events
        	SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
        	SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
        	SDL_ShowCursor(SDL_DISABLE);
        
        // Read frames
        	while ((av_read_frame(pFormatCtx, &packet) >= 0) && (SDL_PollEvent(NULL ) == 0)) {
        // Is this a packet from the video stream?
        		if (packet.stream_index == videoStream) {
        // Decode video frame
        			avcodec_decode_video(pCodecCtx, pFrame, &frameFinished, packet.data, packet.size);
        // Did we get a video frame?
        			if (frameFinished) {
        // Convert the image from its native format to YUV, and display
        
        				SDL_LockYUVOverlay(overlay);
        				pFrameYUV->data[0] = overlay->pixels[0];
        				pFrameYUV->data[1] = overlay->pixels[2];
        				pFrameYUV->data[2] = overlay->pixels[1];
        
        				pFrameYUV->linesize[0] = overlay->pitches[0];
        				pFrameYUV->linesize[1] = overlay->pitches[2];
        				pFrameYUV->linesize[2] = overlay->pitches[1];
        
        //				img_convert((AVPicture *) pFrameYUV, PIX_FMT_YUV420P, (AVPicture *) pFrame, pCodecCtx->pix_fmt, pCodecCtx->width,
        //						pCodecCtx->height);
        
        				// other codes
        				// Convert the image from its native format to RGB
        
        				sws_scale(img_convert_ctx, (const uint8_t* const *) pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data,
        						pFrameYUV->linesize);
        
        				SDL_UnlockYUVOverlay(overlay);
        				SDL_DisplayYUVOverlay(overlay, &rect);
        ///
        				SDL_Delay(30);
        			}
        		}
        // Free the packet that was allocated by av_read_frame
        		av_free_packet(&packet);
        	}
        // Free the RGB image
        	av_free(pFrameYUV);
        // Free the YUV frame
        	av_free(pFrame);
        // Close the codec
        	avcodec_close(pCodecCtx);
        // Close the video file
        	av_close_input_file(pFormatCtx);
        //
        	SDL_FreeYUVOverlay(overlay);
        	return 0;
        }
        

          

      • 参考http://hi.baidu.com/xiaomeng008/archive/tag/ffmpeg

      • ffmpeg: http://blog.csdn.net/byxdaz/article/details/7316304

        ffmpeg编译和使用大全     http://lvzun.iteye.com/blog/706121

      • 重点推荐:http://dranger.com/ffmpeg/ An ffmpeg and SDL Tutorial
      • http://www.libsdl.org/release/SDL-1.2.15/test/  SDL官方示例。 overlay有rgb转换到YUV.
  • 相关阅读:
    序列JSON数据和四种AJAX操作方式
    jquery.validate和jquery.form.js实现表单提交
    JQuery Validate使用总结1:
    HOWTO: Include Base64 Encoded Binary Image Data (data URI scheme) in Inline Cascading Style Sheets (CSS)(转)
    SharePoint 2007 使用4.0 .Net
    动态IP解决方案
    取MS CRM表单的URL
    从Iframe或新开的窗口访问MS CRM 2011(转)
    Toggle or Hidden MS CRM Tab
    Windows 2008下修改域用户密码
  • 原文地址:https://www.cnblogs.com/bigben0123/p/3278046.html
Copyright © 2011-2022 走看看