zoukankan      html  css  js  c++  java
  • FFMEPG -- A ffmpeg and SDL Tutorial : tutorial05

    修改了同步播放ffmpeg问题。并且增加可以放大视频。

      1 // tutorial05.c
      2 // A pedagogical video player that really works!
      3 //
      4 // Code based on FFplay, Copyright (c) 2003 Fabrice Bellard,
      5 // and a tutorial by Martin Bohme (boehme@inb.uni-luebeckREMOVETHIS.de)
      6 // Tested on Gentoo, CVS version 5/01/07 compiled with GCC 4.1.1
      7 // Use
      8 //
      9 // gcc -o tutorial05 tutorial05.c -lavformat -lavcodec -lz -lm `sdl-config --cflags --libs`
     10 // to build (assuming libavformat and libavcodec are correctly installed,
     11 // and assuming you have sdl-config. Please refer to SDL docs for your installation.)
     12 //
     13 // Run using
     14 // tutorial05 myvideofile.mpg
     15 //
     16 // to play the video.
     17 
     18 #include <libavcodec/avcodec.h>
     19 #include <libavformat/avformat.h>
     20 
     21 #include <SDL/SDL.h>
     22 #include <SDL/SDL_thread.h>
     23 #include <libswscale/swscale.h>
     24 #define W 1920
     25 #define H 1080
     26 #ifdef __MINGW32__
     27 #undef main /* Prevents SDL from overriding main() */
     28 #endif
     29 
     30 #include <stdio.h>
     31 #include <math.h>
     32 
     33 #define SDL_AUDIO_BUFFER_SIZE 1024
     34 
     35 #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
     36 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
     37 
     38 #define AV_SYNC_THRESHOLD 0.01
     39 #define AV_NOSYNC_THRESHOLD 10.0
     40 
     41 #define FF_ALLOC_EVENT   (SDL_USEREVENT)
     42 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
     43 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
     44 
     45 #define VIDEO_PICTURE_QUEUE_SIZE 1
     46 
     47 typedef struct PacketQueue {
     48   AVPacketList *first_pkt, *last_pkt;
     49   int nb_packets;
     50   int size;
     51   SDL_mutex *mutex;
     52   SDL_cond *cond;
     53 } PacketQueue;
     54 
     55 
     56 typedef struct VideoPicture {
     57   SDL_Overlay *bmp;
     58   int width, height; /* source height & width */
     59   int allocated;
     60   double pts;
     61 } VideoPicture;
     62 
     63 typedef struct VideoState {
     64 
     65   AVFormatContext *pFormatCtx;
     66   int             videoStream, audioStream;
     67 
     68   double          audio_clock;
     69   AVStream        *audio_st;
     70   PacketQueue     audioq;
     71   uint8_t         audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
     72   unsigned int    audio_buf_size;
     73   unsigned int    audio_buf_index;
     74   AVPacket        audio_pkt;
     75   uint8_t         *audio_pkt_data;
     76   int             audio_pkt_size;
     77   int             audio_hw_buf_size;
     78   double          frame_timer;
     79   double          frame_last_pts;
     80   double          frame_last_delay;
     81   double          video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
     82   AVStream        *video_st;
     83   PacketQueue     videoq;
     84 
     85   VideoPicture    pictq[VIDEO_PICTURE_QUEUE_SIZE];
     86   int             pictq_size, pictq_rindex, pictq_windex;
     87   SDL_mutex       *pictq_mutex;
     88   SDL_cond        *pictq_cond;
     89 
     90   SDL_Thread      *parse_tid;
     91   SDL_Thread      *video_tid;
     92 
     93   char            filename[1024];
     94   int             quit;
     95 } VideoState;
     96 
     97 SDL_Surface     *screen;
     98 
     99 /* Since we only have one decoding thread, the Big Struct
    100    can be global in case we need it. */
    101 VideoState *global_video_state;
    102 
    103 void packet_queue_init(PacketQueue *q) {
    104   memset(q, 0, sizeof(PacketQueue));
    105   q->mutex = SDL_CreateMutex();
    106   q->cond = SDL_CreateCond();
    107 }
    108 int packet_queue_put(PacketQueue *q, AVPacket *pkt) {
    109 
    110   AVPacketList *pkt1;
    111   if(av_dup_packet(pkt) < 0) {
    112     return -1;
    113   }
    114   pkt1 = av_malloc(sizeof(AVPacketList));
    115   if (!pkt1)
    116     return -1;
    117   pkt1->pkt = *pkt;
    118   pkt1->next = NULL;
    119 
    120   SDL_LockMutex(q->mutex);
    121 
    122   if (!q->last_pkt)
    123     q->first_pkt = pkt1;
    124   else
    125     q->last_pkt->next = pkt1;
    126   q->last_pkt = pkt1;
    127   q->nb_packets++;
    128   q->size += pkt1->pkt.size;
    129   SDL_CondSignal(q->cond);
    130 
    131   SDL_UnlockMutex(q->mutex);
    132   return 0;
    133 }
    134 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
    135 {
    136   AVPacketList *pkt1;
    137   int ret;
    138 
    139   SDL_LockMutex(q->mutex);
    140 
    141   for(;;) {
    142 
    143     if(global_video_state->quit) {
    144       ret = -1;
    145       break;
    146     }
    147 
    148     pkt1 = q->first_pkt;
    149     if (pkt1) {
    150       q->first_pkt = pkt1->next;
    151       if (!q->first_pkt)
    152     q->last_pkt = NULL;
    153       q->nb_packets--;
    154       q->size -= pkt1->pkt.size;
    155       *pkt = pkt1->pkt;
    156       av_free(pkt1);
    157       ret = 1;
    158       break;
    159     } else if (!block) {
    160       ret = 0;
    161       break;
    162     } else {
    163       SDL_CondWait(q->cond, q->mutex);
    164     }
    165   }
    166   SDL_UnlockMutex(q->mutex);
    167   return ret;
    168 }
    169 double get_audio_clock(VideoState *is) {
    170   double pts;
    171   int hw_buf_size, bytes_per_sec, n;
    172 
    173   pts = is->audio_clock; /* maintained in the audio thread */
    174   hw_buf_size = is->audio_buf_size - is->audio_buf_index;
    175   bytes_per_sec = 0;
    176   n = is->audio_st->codec->channels * 2;
    177   if(is->audio_st) {
    178     bytes_per_sec = is->audio_st->codec->sample_rate * n;
    179   }
    180   if(bytes_per_sec) {
    181     pts -= (double)hw_buf_size / bytes_per_sec;
    182   }
    183   return pts;
    184 }
    185 
    186 int audio_decode_frame(VideoState *is, uint8_t *audio_buf, int buf_size, double *pts_ptr) {
    187 
    188   int len1, data_size, n;
    189   AVPacket *pkt = &is->audio_pkt;
    190   double pts;
    191 
    192   for(;;) {
    193     while(is->audio_pkt_size > 0) {
    194       data_size = buf_size;
    195       len1 = avcodec_decode_audio2(is->audio_st->codec,
    196                   (int16_t *)audio_buf, &data_size,
    197                   is->audio_pkt_data, is->audio_pkt_size);
    198       if(len1 < 0) {
    199     /* if error, skip frame */
    200     is->audio_pkt_size = 0;
    201     break;
    202       }
    203       is->audio_pkt_data += len1;
    204       is->audio_pkt_size -= len1;
    205       if(data_size <= 0) {
    206     /* No data yet, get more frames */
    207     continue;
    208       }
    209       pts = is->audio_clock;
    210       *pts_ptr = pts;
    211       n = 2 * is->audio_st->codec->channels;
    212       is->audio_clock += (double)data_size /
    213     (double)(n * is->audio_st->codec->sample_rate);
    214 
    215       /* We have data, return it and come back for more later */
    216       return data_size;
    217     }
    218     if(pkt->data)
    219       av_free_packet(pkt);
    220 
    221     if(is->quit) {
    222       return -1;
    223     }
    224     /* next packet */
    225     if(packet_queue_get(&is->audioq, pkt, 1) < 0) {
    226       return -1;
    227     }
    228     is->audio_pkt_data = pkt->data;
    229     is->audio_pkt_size = pkt->size;
    230     /* if update, update the audio clock w/pts */
    231     if(pkt->pts != AV_NOPTS_VALUE) {
    232       is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
    233     }
    234 
    235   }
    236 }
    237 
    238 void audio_callback(void *userdata, Uint8 *stream, int len) {
    239 
    240   VideoState *is = (VideoState *)userdata;
    241   int len1, audio_size;
    242   double pts;
    243 
    244   while(len > 0) {
    245     if(is->audio_buf_index >= is->audio_buf_size) {
    246       /* We have already sent all our data; get more */
    247       audio_size = audio_decode_frame(is, is->audio_buf, sizeof(is->audio_buf), &pts);
    248       if(audio_size < 0) {
    249     /* If error, output silence */
    250     is->audio_buf_size = 1024;
    251     memset(is->audio_buf, 0, is->audio_buf_size);
    252       } else {
    253     is->audio_buf_size = audio_size;
    254       }
    255       is->audio_buf_index = 0;
    256     }
    257     len1 = is->audio_buf_size - is->audio_buf_index;
    258     if(len1 > len)
    259       len1 = len;
    260     memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
    261     len -= len1;
    262     stream += len1;
    263     is->audio_buf_index += len1;
    264   }
    265 }
    266 
    267 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque) {
    268   SDL_Event event;
    269   event.type = FF_REFRESH_EVENT;
    270   event.user.data1 = opaque;
    271   SDL_PushEvent(&event);
    272   return 0; /* 0 means stop timer */
    273 }
    274 
    275 /* schedule a video refresh in 'delay' ms */
    276 static void schedule_refresh(VideoState *is, int delay) {
    277   SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
    278 }
    279 
    280 void video_display(VideoState *is) {
    281 
    282   SDL_Rect rect;
    283   VideoPicture *vp;
    284   AVPicture pict;
    285   float aspect_ratio;
    286   int w, h, x, y;
    287   int i;
    288 
    289   vp = &is->pictq[is->pictq_rindex];
    290   if(vp->bmp) {
    291     if(is->video_st->codec->sample_aspect_ratio.num == 0) {
    292       aspect_ratio = 0;
    293     } else {
    294       aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio) *
    295     is->video_st->codec->width / is->video_st->codec->height;
    296     }
    297     if(aspect_ratio <= 0.0) {
    298       aspect_ratio = (float)is->video_st->codec->width /
    299     (float)is->video_st->codec->height;
    300     }
    301     h = screen->h;
    302     w = ((int)rint(h * aspect_ratio)) & -3;
    303     if(w > screen->w) {
    304       w = screen->w;
    305       h = ((int)rint(w / aspect_ratio)) & -3;
    306     }
    307     x = (screen->w - w) / 2;
    308     y = (screen->h - h) / 2;
    309 //x=0;y=0;w=0;h=0;
    310     rect.x = x;
    311     rect.y = y;
    312     rect.w = w;
    313     rect.h = h;
    314     SDL_DisplayYUVOverlay(vp->bmp, &rect);
    315   }
    316 }
    317 
    318 void video_refresh_timer(void *userdata) {
    319 
    320   VideoState *is = (VideoState *)userdata;
    321   VideoPicture *vp;
    322   double actual_delay, delay, sync_threshold, ref_clock, diff;
    323 
    324   if(is->video_st) {
    325     if(is->pictq_size == 0) {
    326       schedule_refresh(is, 1);
    327     } else {
    328       vp = &is->pictq[is->pictq_rindex];
    329 
    330       delay = vp->pts - is->frame_last_pts; /* the pts from last time */
    331       if(delay <= 0 || delay >= 1.0) {
    332     /* if incorrect delay, use previous one */
    333     delay = is->frame_last_delay;
    334       }
    335       /* save for next time */
    336       is->frame_last_delay = delay;
    337       is->frame_last_pts = vp->pts;
    338 
    339       /* update delay to sync to audio */
    340       ref_clock = get_audio_clock(is);
    341       diff = vp->pts - ref_clock;
    342 
    343       /* Skip or repeat the frame. Take delay into account
    344      FFPlay still doesn't "know if this is the best guess." */
    345       sync_threshold = (delay > AV_SYNC_THRESHOLD) ? delay : AV_SYNC_THRESHOLD;
    346       if(fabs(diff) < AV_NOSYNC_THRESHOLD) {
    347     if(diff <= -sync_threshold) {
    348       delay = 0;
    349     } else if(diff >= sync_threshold) {
    350       delay = 2 * delay;
    351     }
    352       }
    353       is->frame_timer += delay;
    354       /* computer the REAL delay */
    355       actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
    356       if(actual_delay < 0.010) {
    357     /* Really it should skip the picture instead */
    358     actual_delay = 0.010;
    359       }
    360       schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
    361       /* show the picture! */
    362       video_display(is);
    363 
    364       /* update queue for next picture! */
    365       if(++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE) {
    366     is->pictq_rindex = 0;
    367       }
    368       SDL_LockMutex(is->pictq_mutex);
    369       is->pictq_size--;
    370       SDL_CondSignal(is->pictq_cond);
    371       SDL_UnlockMutex(is->pictq_mutex);
    372     }
    373   } else {
    374     schedule_refresh(is, 100);
    375   }
    376 }
    377 
    378 void alloc_picture(void *userdata) {
    379 
    380   VideoState *is = (VideoState *)userdata;
    381   VideoPicture *vp;
    382 
    383   vp = &is->pictq[is->pictq_windex];
    384   if(vp->bmp) {
    385     // we already have one make another, bigger/smaller
    386     SDL_FreeYUVOverlay(vp->bmp);
    387   }
    388   // Allocate a place to put our YUV image on that screen
    389   vp->bmp = SDL_CreateYUVOverlay(W,
    390                 H,
    391                  SDL_YV12_OVERLAY,
    392                  screen);
    393   vp->width = W;
    394   vp->height = H;
    395 
    396   SDL_LockMutex(is->pictq_mutex);
    397   vp->allocated = 1;
    398   SDL_CondSignal(is->pictq_cond);
    399   SDL_UnlockMutex(is->pictq_mutex);
    400 
    401 }
    402 
    403 int queue_picture(VideoState *is, AVFrame *pFrame, double pts) {
    404 
    405   VideoPicture *vp;
    406   int dst_pix_fmt;
    407   AVPicture pict;
    408 
    409   /* wait until we have space for a new pic */
    410   SDL_LockMutex(is->pictq_mutex);
    411   while(is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
    412     !is->quit) {
    413     SDL_CondWait(is->pictq_cond, is->pictq_mutex);
    414   }
    415   SDL_UnlockMutex(is->pictq_mutex);
    416 
    417   if(is->quit)
    418     return -1;
    419 
    420   // windex is set to 0 initially
    421   vp = &is->pictq[is->pictq_windex];
    422 
    423   /* allocate or resize the buffer! */
    424   if(!vp->bmp ||
    425      vp->width != is->video_st->codec->width ||
    426      vp->height != is->video_st->codec->height) {
    427     SDL_Event event;
    428 
    429     vp->allocated = 0;
    430     /* we have to do it in the main thread */
    431     event.type = FF_ALLOC_EVENT;
    432     event.user.data1 = is;
    433     SDL_PushEvent(&event);
    434 
    435     /* wait until we have a picture allocated */
    436     SDL_LockMutex(is->pictq_mutex);
    437     while(!vp->allocated && !is->quit) {
    438       SDL_CondWait(is->pictq_cond, is->pictq_mutex);
    439     }
    440     SDL_UnlockMutex(is->pictq_mutex);
    441     if(is->quit) {
    442       return -1;
    443     }
    444   }
    445   /* We have a place to put our picture on the queue */
    446   /* If we are skipping a frame, do we set this to null
    447      but still return vp->allocated = 1? */
    448 
    449 
    450   if(vp->bmp) {
    451 
    452     SDL_LockYUVOverlay(vp->bmp);
    453 
    454     dst_pix_fmt = PIX_FMT_YUV420P;
    455     /* point pict at the queue */
    456 
    457     pict.data[0] = vp->bmp->pixels[0];
    458     pict.data[1] = vp->bmp->pixels[2];
    459     pict.data[2] = vp->bmp->pixels[1];
    460 
    461     pict.linesize[0] = vp->bmp->pitches[0];
    462     pict.linesize[1] = vp->bmp->pitches[2];
    463     pict.linesize[2] = vp->bmp->pitches[1];
    464 
    465     // Convert the image into YUV format that SDL uses
    466 //    img_convert(&pict, dst_pix_fmt,
    467 //        (AVPicture *)pFrame, is->video_st->codec->pix_fmt,
    468 //        is->video_st->codec->width, is->video_st->codec->height);
    469 
    470 #if 1//zhibin
    471 
    472     static struct SwsContext *img_convert_ctx;
    473 
    474         img_convert_ctx = sws_getContext(is->video_st->codec->width, is->video_st->codec->height, is->video_st->codec->pix_fmt, W,H,
    475 //                                                                PIX_FMT_RGB24,
    476                 PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL );
    477         sws_scale(img_convert_ctx, (const uint8_t* const *) pFrame->data, pFrame->linesize, 0, is->video_st->codec->height, pict.data,
    478                 pict.linesize);
    479 #endif
    480 
    481     SDL_UnlockYUVOverlay(vp->bmp);
    482     vp->pts = pts;
    483 
    484     /* now we inform our display thread that we have a pic ready */
    485     if(++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE) {
    486       is->pictq_windex = 0;
    487     }
    488     SDL_LockMutex(is->pictq_mutex);
    489     is->pictq_size++;
    490     SDL_UnlockMutex(is->pictq_mutex);
    491   }
    492   return 0;
    493 }
    494 
    495 double synchronize_video(VideoState *is, AVFrame *src_frame, double pts) {
    496 
    497   double frame_delay;
    498 
    499   if(pts != 0) {
    500     /* if we have pts, set video clock to it */
    501     is->video_clock = pts;
    502   } else {
    503     /* if we aren't given a pts, set it to the clock */
    504     pts = is->video_clock;
    505   }
    506   /* update the video clock */
    507   frame_delay = av_q2d(is->video_st->codec->time_base);
    508   /* if we are repeating a frame, adjust clock accordingly */
    509   frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
    510   is->video_clock += frame_delay;
    511   return pts;
    512 }
    513 uint64_t global_video_pkt_pts = AV_NOPTS_VALUE;
    514 
    515 /* These are called whenever we allocate a frame
    516  * buffer. We use this to store the global_pts in
    517  * a frame at the time it is allocated.
    518  */
    519 int our_get_buffer(struct AVCodecContext *c, AVFrame *pic) {
    520   int ret = avcodec_default_get_buffer(c, pic);
    521   uint64_t *pts = av_malloc(sizeof(uint64_t));
    522   *pts = global_video_pkt_pts;
    523   pic->opaque = pts;
    524   return ret;
    525 }
    526 void our_release_buffer(struct AVCodecContext *c, AVFrame *pic) {
    527   if(pic) av_freep(&pic->opaque);
    528   avcodec_default_release_buffer(c, pic);
    529 }
    530 
    531 int video_thread(void *arg) {
    532   VideoState *is = (VideoState *)arg;
    533   AVPacket pkt1, *packet = &pkt1;
    534   int len1, frameFinished;
    535   AVFrame *pFrame;
    536   double pts;
    537 
    538   pFrame = avcodec_alloc_frame();
    539 
    540   for(;;) {
    541     if(packet_queue_get(&is->videoq, packet, 1) < 0) {
    542       // means we quit getting packets
    543       break;
    544     }
    545     pts = 0;
    546 
    547     // Save global pts to be stored in pFrame in first call
    548     global_video_pkt_pts = packet->pts;
    549     // Decode video frame
    550     len1 = avcodec_decode_video(is->video_st->codec, pFrame, &frameFinished,
    551                 packet->data, packet->size);
    552     if(packet->dts == AV_NOPTS_VALUE
    553        && pFrame->opaque && *(uint64_t*)pFrame->opaque != AV_NOPTS_VALUE) {
    554       pts = *(uint64_t *)pFrame->opaque;
    555     } else if(packet->dts != AV_NOPTS_VALUE) {
    556       pts = packet->dts;
    557     } else {
    558       pts = 0;
    559     }
    560     pts *= av_q2d(is->video_st->time_base);
    561 
    562     // Did we get a video frame?
    563     if(frameFinished) {
    564       pts = synchronize_video(is, pFrame, pts);
    565       if(queue_picture(is, pFrame, pts) < 0) {
    566     break;
    567       }
    568     }
    569     av_free_packet(packet);
    570   }
    571   av_free(pFrame);
    572   return 0;
    573 }
    574 
    575 int stream_component_open(VideoState *is, int stream_index) {
    576 
    577   AVFormatContext *pFormatCtx = is->pFormatCtx;
    578   AVCodecContext *codecCtx;
    579   AVCodec *codec;
    580   SDL_AudioSpec wanted_spec, spec;
    581 
    582   if(stream_index < 0 || stream_index >= pFormatCtx->nb_streams) {
    583     return -1;
    584   }
    585 
    586   // Get a pointer to the codec context for the video stream
    587   codecCtx = pFormatCtx->streams[stream_index]->codec;
    588 
    589   if(codecCtx->codec_type == CODEC_TYPE_AUDIO) {
    590     // Set audio settings from codec info
    591     wanted_spec.freq = codecCtx->sample_rate;
    592     wanted_spec.format = AUDIO_S16SYS;
    593     wanted_spec.channels = codecCtx->channels;
    594     wanted_spec.silence = 0;
    595     wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
    596     wanted_spec.callback = audio_callback;
    597     wanted_spec.userdata = is;
    598 
    599     if(SDL_OpenAudio(&wanted_spec, &spec) < 0) {
    600       fprintf(stderr, "SDL_OpenAudio: %s
    ", SDL_GetError());
    601       return -1;
    602     }
    603     is->audio_hw_buf_size = spec.size;
    604   }
    605   codec = avcodec_find_decoder(codecCtx->codec_id);
    606 
    607   if(!codec || (avcodec_open(codecCtx, codec) < 0)) {
    608     fprintf(stderr, "Unsupported codec!
    ");
    609     return -1;
    610   }
    611 
    612   switch(codecCtx->codec_type) {
    613   case CODEC_TYPE_AUDIO:
    614     is->audioStream = stream_index;
    615     is->audio_st = pFormatCtx->streams[stream_index];
    616     is->audio_buf_size = 0;
    617     is->audio_buf_index = 0;
    618     memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
    619     packet_queue_init(&is->audioq);
    620     SDL_PauseAudio(0);
    621     break;
    622   case CODEC_TYPE_VIDEO:
    623     is->videoStream = stream_index;
    624     is->video_st = pFormatCtx->streams[stream_index];
    625 
    626     is->frame_timer = (double)av_gettime() / 1000000.0;
    627     is->frame_last_delay = 40e-3;
    628 
    629     packet_queue_init(&is->videoq);
    630     is->video_tid = SDL_CreateThread(video_thread, is);
    631     codecCtx->get_buffer = our_get_buffer;
    632     codecCtx->release_buffer = our_release_buffer;
    633     break;
    634   default:
    635     break;
    636   }
    637 
    638 
    639 }
    640 
    641 int decode_interrupt_cb(void) {
    642   return (global_video_state && global_video_state->quit);
    643 }
    644 
    645 int decode_thread(void *arg) {
    646 
    647   VideoState *is = (VideoState *)arg;
    648   AVFormatContext *pFormatCtx;
    649   AVPacket pkt1, *packet = &pkt1;
    650 
    651   int video_index = -1;
    652   int audio_index = -1;
    653   int i;
    654 
    655   is->videoStream=-1;
    656   is->audioStream=-1;
    657 
    658   global_video_state = is;
    659   // will interrupt blocking functions if we quit!
    660   url_set_interrupt_cb(decode_interrupt_cb);
    661 
    662   // Open video file
    663   if(av_open_input_file(&pFormatCtx, is->filename, NULL, 0, NULL)!=0)
    664     return -1; // Couldn't open file
    665 
    666   is->pFormatCtx = pFormatCtx;
    667 
    668   // Retrieve stream information
    669   if(av_find_stream_info(pFormatCtx)<0)
    670     return -1; // Couldn't find stream information
    671 
    672   // Dump information about file onto standard error
    673   dump_format(pFormatCtx, 0, is->filename, 0);
    674 
    675   // Find the first video stream
    676 
    677   for(i=0; i<pFormatCtx->nb_streams; i++) {
    678     if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO &&
    679        video_index < 0) {
    680       video_index=i;
    681     }
    682     if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_AUDIO &&
    683        audio_index < 0) {
    684       audio_index=i;
    685     }
    686   }
    687   if(audio_index >= 0) {
    688     stream_component_open(is, audio_index);
    689   }
    690   if(video_index >= 0) {
    691     stream_component_open(is, video_index);
    692   }
    693 
    694   if(is->videoStream < 0 || is->audioStream < 0) {
    695     fprintf(stderr, "%s: could not open codecs
    ", is->filename);
    696     goto fail;
    697   }
    698 
    699   // main decode loop
    700 
    701   for(;;) {
    702     if(is->quit) {
    703       break;
    704     }
    705     // seek stuff goes here
    706     if(is->audioq.size > MAX_AUDIOQ_SIZE ||
    707        is->videoq.size > MAX_VIDEOQ_SIZE) {
    708       SDL_Delay(10);
    709       continue;
    710     }
    711     if(av_read_frame(is->pFormatCtx, packet) < 0) {
    712       if(url_ferror(&pFormatCtx->pb) == 0) {
    713     SDL_Delay(100); /* no error; wait for user input */
    714     continue;
    715       } else {
    716     break;
    717       }
    718     }
    719     // Is this a packet from the video stream?
    720     if(packet->stream_index == is->videoStream) {
    721       packet_queue_put(&is->videoq, packet);
    722     } else if(packet->stream_index == is->audioStream) {
    723       packet_queue_put(&is->audioq, packet);
    724     } else {
    725       av_free_packet(packet);
    726     }
    727   }
    728   /* all done - wait for it */
    729   while(!is->quit) {
    730     SDL_Delay(100);
    731   }
    732 
    733  fail:
    734   {
    735     SDL_Event event;
    736     event.type = FF_QUIT_EVENT;
    737     event.user.data1 = is;
    738     SDL_PushEvent(&event);
    739   }
    740   return 0;
    741 }
    742 
    743 int main_sync(int argc, char *argv[]) {
    744 
    745   SDL_Event       event;
    746 
    747   VideoState      *is;
    748 
    749   is = av_mallocz(sizeof(VideoState));
    750 /*
    751   if(argc < 2) {
    752     fprintf(stderr, "Usage: test <file>
    ");
    753     exit(1);
    754   }*/
    755   // Register all formats and codecs
    756   av_register_all();
    757 
    758   if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
    759     fprintf(stderr, "Could not initialize SDL - %s
    ", SDL_GetError());
    760     exit(1);
    761   }
    762 
    763   // Make a screen to put our video
    764 #ifndef __DARWIN__
    765         screen = SDL_SetVideoMode(W, H, 0, 0);
    766 #else
    767         screen = SDL_SetVideoMode(640, 480, 24, 0);
    768 #endif
    769   if(!screen) {
    770     fprintf(stderr, "SDL: could not set video mode - exiting
    ");
    771     exit(1);
    772   }
    773 
    774   strncpy(is->filename, "test.mp4", sizeof(is->filename));
    775 
    776   is->pictq_mutex = SDL_CreateMutex();
    777   is->pictq_cond = SDL_CreateCond();
    778 
    779   schedule_refresh(is, 40);
    780 
    781   is->parse_tid = SDL_CreateThread(decode_thread, is);
    782   if(!is->parse_tid) {
    783     av_free(is);
    784     return -1;
    785   }
    786   for(;;) {
    787 
    788     SDL_WaitEvent(&event);
    789     switch(event.type) {
    790     case FF_QUIT_EVENT:
    791     case SDL_QUIT:
    792       is->quit = 1;
    793       SDL_Quit();
    794       exit(0);
    795       break;
    796     case FF_ALLOC_EVENT:
    797       alloc_picture(event.user.data1);
    798       break;
    799     case FF_REFRESH_EVENT:
    800       video_refresh_timer(event.user.data1);
    801       break;
    802     default:
    803       break;
    804     }
    805   }
    806   return 0;
    807 
    808 }
    View Code
  • 相关阅读:
    vs2008.net多语言实现方法
    C#中 Process的扩展类ProcessExtensions
    C#获取当前系统信息的类
    非常好用的GridView控件yyControls中的SmartGridView
    Android提供两个常用的消息弹出框【Toast和Alert】
    [置顶] Asp.net中实现多语言的Page的扩展的基类
    C# word类库
    在系统出现未处理的错误时,在Global的Application_Error记录下错误
    向大家推荐一个非常好用的JS日历控件My97DatePicker
    网页代码测试工具集合
  • 原文地址:https://www.cnblogs.com/bigben0123/p/3296202.html
Copyright © 2011-2022 走看看