zoukankan      html  css  js  c++  java
  • Android本地视频播放器开发ffmpeg解码视频文件中的音频(2)

    Android本地视频播放器开发--ffmpeg解码视频文件中的音频(1)中我们从视频文件中解码出音频,这一章中将使用OpenSL ES来播放解码的音频数据,首先关于OpenSL ES这里暂不介绍,可以查看官网以及NDK中samples下面的native-audio里面的文件,这里我也是扣取了其中的代码,我们播放音频的部分在上一章的基础上进行添加的,代码如下:

    #include <stdio.h>
    #include <stdlib.h>
    #include <string.h>
    
    #include <assert.h>
    #include <android/log.h>
    
    // for native audio
    #include <SLES/OpenSLES.h>
    #include <SLES/OpenSLES_Android.h>
    
    #include "VideoPlayerDecode.h"
    #include "../ffmpeg/libavutil/avutil.h"
    #include "../ffmpeg/libavcodec/avcodec.h"
    #include "../ffmpeg/libavformat/avformat.h"
    
    #define LOGI(...) ((void)__android_log_print(ANDROID_LOG_INFO, "graduation", __VA_ARGS__))
    
    AVFormatContext *pFormatCtx = NULL;
    int             audioStream, delay_time, videoFlag = 0;
    AVCodecContext  *aCodecCtx;
    AVCodec         *aCodec;
    AVFrame         *aFrame;
    AVPacket        packet;
    int  frameFinished = 0;
    
    // engine interfaces
    static SLObjectItf engineObject = NULL;
    static SLEngineItf engineEngine;
    
    // output mix interfaces
    static SLObjectItf outputMixObject = NULL;
    static SLEnvironmentalReverbItf outputMixEnvironmentalReverb = NULL;
    
    // buffer queue player interfaces
    static SLObjectItf bqPlayerObject = NULL;
    static SLPlayItf bqPlayerPlay;
    static SLAndroidSimpleBufferQueueItf bqPlayerBufferQueue;
    static SLEffectSendItf bqPlayerEffectSend;
    static SLMuteSoloItf bqPlayerMuteSolo;
    static SLVolumeItf bqPlayerVolume;
    
    // aux effect on the output mix, used by the buffer queue player
    static const SLEnvironmentalReverbSettings reverbSettings =
        SL_I3DL2_ENVIRONMENT_PRESET_STONECORRIDOR;
    
    // file descriptor player interfaces
    static SLObjectItf fdPlayerObject = NULL;
    static SLPlayItf fdPlayerPlay;
    static SLSeekItf fdPlayerSeek;
    static SLMuteSoloItf fdPlayerMuteSolo;
    static SLVolumeItf fdPlayerVolume;
    
    // pointer and size of the next player buffer to enqueue, and number of remaining buffers
    static short *nextBuffer;
    static unsigned nextSize;
    static int nextCount;
    
    // this callback handler is called every time a buffer finishes playing
    void bqPlayerCallback(SLAndroidSimpleBufferQueueItf bq, void *context)
    {
        assert(bq == bqPlayerBufferQueue);
        assert(NULL == context);
        // for streaming playback, replace this test by logic to find and fill the next buffer
        if (--nextCount > 0 && NULL != nextBuffer && 0 != nextSize) {
            SLresult result;
            // enqueue another buffer
            result = (*bqPlayerBufferQueue)->Enqueue(bqPlayerBufferQueue, nextBuffer, nextSize);
            // the most likely other result is SL_RESULT_BUFFER_INSUFFICIENT,
            // which for this code example would indicate a programming error
            assert(SL_RESULT_SUCCESS == result);
        }
    }
    
    
    void createEngine(JNIEnv* env, jclass clazz)
    {
    	SLresult result;
    
        // create engine
        result = slCreateEngine(&engineObject, 0, NULL, 0, NULL, NULL);
        assert(SL_RESULT_SUCCESS == result);
    
        // realize the engine
        result = (*engineObject)->Realize(engineObject, SL_BOOLEAN_FALSE);
        assert(SL_RESULT_SUCCESS == result);
    
        // get the engine interface, which is needed in order to create other objects
        result = (*engineObject)->GetInterface(engineObject, SL_IID_ENGINE, &engineEngine);
        assert(SL_RESULT_SUCCESS == result);
    
        // create output mix, with environmental reverb specified as a non-required interface
        const SLInterfaceID ids[1] = {SL_IID_ENVIRONMENTALREVERB};
        const SLboolean req[1] = {SL_BOOLEAN_FALSE};
        result = (*engineEngine)->CreateOutputMix(engineEngine, &outputMixObject, 1, ids, req);
        assert(SL_RESULT_SUCCESS == result);
    
        // realize the output mix
        result = (*outputMixObject)->Realize(outputMixObject, SL_BOOLEAN_FALSE);
        assert(SL_RESULT_SUCCESS == result);
    
        // get the environmental reverb interface
        // this could fail if the environmental reverb effect is not available,
        // either because the feature is not present, excessive CPU load, or
        // the required MODIFY_AUDIO_SETTINGS permission was not requested and granted
        result = (*outputMixObject)->GetInterface(outputMixObject, SL_IID_ENVIRONMENTALREVERB,
                &outputMixEnvironmentalReverb);
        if (SL_RESULT_SUCCESS == result) {
            result = (*outputMixEnvironmentalReverb)->SetEnvironmentalReverbProperties(
                    outputMixEnvironmentalReverb, &reverbSettings);
        }
        // ignore unsuccessful result codes for environmental reverb, as it is optional for this example
    }
    
    void createBufferQueueAudioPlayer(JNIEnv* env, jclass clazz, int rate, int channel,int bitsPerSample)
    {
    	SLresult result;
    
        // configure audio source
        SLDataLocator_AndroidSimpleBufferQueue loc_bufq = {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 2};
    //    SLDataFormat_PCM format_pcm = {SL_DATAFORMAT_PCM, 2, SL_SAMPLINGRATE_16,
    //        SL_PCMSAMPLEFORMAT_FIXED_16, SL_PCMSAMPLEFORMAT_FIXED_16,
    //        SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT, SL_BYTEORDER_LITTLEENDIAN};
       SLDataFormat_PCM format_pcm;
       format_pcm.formatType = SL_DATAFORMAT_PCM;
    format_pcm.numChannels = channel;
    format_pcm.samplesPerSec = rate * 1000;
     format_pcm.bitsPerSample = bitsPerSample;
     format_pcm.containerSize = 16;
    if(channel == 2)
    format_pcm.channelMask = SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT;
    else
    format_pcm.channelMask = SL_SPEAKER_FRONT_CENTER;
    format_pcm.endianness = SL_BYTEORDER_LITTLEENDIAN;
        SLDataSource audioSrc = {&loc_bufq, &format_pcm};
    
        // configure audio sink
        SLDataLocator_OutputMix loc_outmix = {SL_DATALOCATOR_OUTPUTMIX, outputMixObject};
        SLDataSink audioSnk = {&loc_outmix, NULL};
    
        // create audio player
        const SLInterfaceID ids[3] = {SL_IID_BUFFERQUEUE, SL_IID_EFFECTSEND,
                /*SL_IID_MUTESOLO,*/ SL_IID_VOLUME};
        const SLboolean req[3] = {SL_BOOLEAN_TRUE, SL_BOOLEAN_TRUE,
                /*SL_BOOLEAN_TRUE,*/ SL_BOOLEAN_TRUE};
        result = (*engineEngine)->CreateAudioPlayer(engineEngine, &bqPlayerObject, &audioSrc, &audioSnk,
                3, ids, req);
        assert(SL_RESULT_SUCCESS == result);
    // realize the player
        result = (*bqPlayerObject)->Realize(bqPlayerObject, SL_BOOLEAN_FALSE);
        assert(SL_RESULT_SUCCESS == result);
    
        // get the play interface
        result = (*bqPlayerObject)->GetInterface(bqPlayerObject, SL_IID_PLAY, &bqPlayerPlay);
        assert(SL_RESULT_SUCCESS == result);
    
        // get the buffer queue interface
        result = (*bqPlayerObject)->GetInterface(bqPlayerObject, SL_IID_BUFFERQUEUE,
                &bqPlayerBufferQueue);
        assert(SL_RESULT_SUCCESS == result);
    
        // register callback on the buffer queue
        result = (*bqPlayerBufferQueue)->RegisterCallback(bqPlayerBufferQueue, bqPlayerCallback, NULL);
        assert(SL_RESULT_SUCCESS == result);
    
        // get the effect send interface
        result = (*bqPlayerObject)->GetInterface(bqPlayerObject, SL_IID_EFFECTSEND,
                &bqPlayerEffectSend);
        assert(SL_RESULT_SUCCESS == result);
    
    #if 0   // mute/solo is not supported for sources that are known to be mono, as this is
        // get the mute/solo interface
        result = (*bqPlayerObject)->GetInterface(bqPlayerObject, SL_IID_MUTESOLO, &bqPlayerMuteSolo);
        assert(SL_RESULT_SUCCESS == result);
    #endif
    
        // get the volume interface
        result = (*bqPlayerObject)->GetInterface(bqPlayerObject, SL_IID_VOLUME, &bqPlayerVolume);
        assert(SL_RESULT_SUCCESS == result);
    
    // set the player's state to playing
        result = (*bqPlayerPlay)->SetPlayState(bqPlayerPlay, SL_PLAYSTATE_PLAYING);
        assert(SL_RESULT_SUCCESS == result);
    
    }
    
    void AudioWrite(const void*buffer, int size)
    {
    	(*bqPlayerBufferQueue)->Enqueue(bqPlayerBufferQueue, buffer, size);
    }
    
    	JNIEXPORT jint JNICALL Java_com_zhangjie_graduation_videopalyer_jni_VideoPlayerDecode_VideoPlayer
    (JNIEnv *env, jclass clz, jstring fileName)
    {
    	const char* local_title = (*env)->GetStringUTFChars(env, fileName, NULL);
    	av_register_all();//注册所有支持的文件格式以及编解码器
    	/*
    	 *只读取文件头,并不会填充流信息
    	 */
    	if(avformat_open_input(&pFormatCtx, local_title, NULL, NULL) != 0)
    		return -1;
    	/*
    	 *获取文件中的流信息,此函数会读取packet,并确定文件中所有流信息,
    	 *设置pFormatCtx->streams指向文件中的流,但此函数并不会改变文件指针,
    	 *读取的packet会给后面的解码进行处理。
    	 */
    	if(avformat_find_stream_info(pFormatCtx, NULL) < 0)
    		return -1;
    	/*
    	 *输出文件的信息,也就是我们在使用ffmpeg时能够看到的文件详细信息,
    	 *第二个参数指定输出哪条流的信息,-1代表ffmpeg自己选择。最后一个参数用于
    	 *指定dump的是不是输出文件,我们的dump是输入文件,因此一定要为0
    	 */
    	av_dump_format(pFormatCtx, -1, local_title, 0);
    	int i = 0;
    	for(i=0; i< pFormatCtx->nb_streams; i++)
    	{
    		if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO){
    			audioStream = i;
    			break;
    		}
    	}
    
    	if(audioStream < 0)return -1;
    	aCodecCtx = pFormatCtx->streams[audioStream]->codec;
    	aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
    	if(avcodec_open2(aCodecCtx, aCodec, NULL) < 0)return -1;
    	aFrame = avcodec_alloc_frame();
    	if(aFrame == NULL)return -1;
    	int ret;
    	createEngine(env, clz);
    	int flag_start = 0;
    	while(videoFlag != -1)
    	{
    		if(av_read_frame(pFormatCtx, &packet) < 0)break;
    		if(packet.stream_index == audioStream)
    		{
    			ret = avcodec_decode_audio4(aCodecCtx, aFrame, &frameFinished, &packet);
    			if(ret > 0 && frameFinished)
    			{
    				if(flag_start == 0)
    				{
    					flag_start = 1;
    					createBufferQueueAudioPlayer(env, clz, aCodecCtx->sample_rate, aCodecCtx->channels, SL_PCMSAMPLEFORMAT_FIXED_16);
    				}
    				int data_size = av_samples_get_buffer_size(
    						aFrame->linesize,aCodecCtx->channels,
    						aFrame->nb_samples,aCodecCtx->sample_fmt, 1);
    				LOGI("audioDecodec  :%d : %d, :%d    :%d",data_size,aCodecCtx->channels,aFrame->nb_samples,aCodecCtx->sample_rate);
    				(*bqPlayerBufferQueue)->Enqueue(bqPlayerBufferQueue, aFrame->data[0], data_size);
    			}
    
    		}
    		usleep(5000);
    		while(videoFlag != 0)
    		{
    			if(videoFlag == 1)//暂停
    			{
    				sleep(1);
    			}else if(videoFlag == -1) //停止
    			{
    				break;
    			}
    		}
    		av_free_packet(&packet);
    	}
    	av_free(aFrame);
    	avcodec_close(aCodecCtx);
    	avformat_close_input(&pFormatCtx);
    	(*env)->ReleaseStringUTFChars(env, fileName, local_title);
    }
    
    JNIEXPORT jint JNICALL Java_com_zhangjie_graduation_videopalyer_jni_VideoPlayerDecode_VideoPlayerPauseOrPlay
      (JNIEnv *env, jclass clz)
    {
            if(videoFlag == 1)
            {
                    videoFlag = 0;
            }else if(videoFlag == 0){
                    videoFlag = 1;
            }
            return videoFlag;
    }
    
    JNIEXPORT jint JNICALL Java_com_zhangjie_graduation_videopalyer_jni_VideoPlayerDecode_VideoPlayerStop
      (JNIEnv *env, jclass clz)
    {
            videoFlag = -1;
    }


    然后就是需要在Android.mk中添加OpenSL ES的库支持,代码如下:

    LOCAL_PATH := $(call my-dir)
    #######################################################
    ##########		ffmpeg-prebuilt		#######
    #######################################################
    #declare the prebuilt library
    include $(CLEAR_VARS)
    LOCAL_MODULE := ffmpeg-prebuilt
    LOCAL_SRC_FILES := ffmpeg/android/armv7-a/libffmpeg-neon.so
    LOCAL_EXPORT_C_INCLUDES := ffmpeg/android/armv7-a/include
    LOCAL_EXPORT_LDLIBS := ffmpeg/android/armv7-a/libffmpeg-neon.so
    LOCAL_PRELINK_MODULE := true
    include $(PREBUILT_SHARED_LIBRARY)
    
    ########################################################
    ##		ffmpeg-test-neno.so		########
    ########################################################
    include $(CLEAR_VARS)
    TARGET_ARCH_ABI=armeabi-v7a
    LOCAL_ARM_MODE=arm
    LOCAL_ARM_NEON=true
    LOCAL_ALLOW_UNDEFINED_SYMBOLS=false
    LOCAL_MODULE := ffmpeg-test-neon
    #LOCAL_SRC_FILES := jniffmpeg/VideoPlayerDecode.c
    LOCAL_SRC_FILES := jniffmpeg/Decodec_Audio.c	
    
    LOCAL_C_INCLUDES := $(LOCAL_PATH)/ffmpeg/android/armv7-a/include \
    		    $(LOCAL_PATH)/ffmpeg \
    		    $(LOCAL_PATH)/ffmpeg/libavutil \
    		    $(LOCAL_PATH)/ffmpeg/libavcodec \
    		    $(LOCAL_PATH)/ffmpeg/libavformat \
    		    $(LOCAL_PATH)/ffmpeg/libavcodec \
    		    $(LOCAL_PATH)/ffmpeg/libswscale \
    		    $(LOCAL_PATH)/jniffmpeg \
    		    $(LOCAL_PATH)
    LOCAL_SHARED_LIBRARY := ffmpeg-prebuilt
    LOCAL_LDLIBS    := -llog -lGLESv2 -ljnigraphics -lz -lm $(LOCAL_PATH)/ffmpeg/android/armv7-a/libffmpeg-neon.so
    LOCAL_LDLIBS    += -lOpenSLES 
    include $(BUILD_SHARED_LIBRARY)


    由于OpenSLES最低版本需要9所以要在Application.mk中添加平台

    # The ARMv7 is significanly faster due to the use of the hardware FPU
    APP_ABI := armeabi 
    APP_PLATFORM := android-9
    APP_STL := stlport_static
    APP_CPPFLAGS += -fno-rtti
    #APP_ABI := armeabi


    最后在终端运行ndk-build,就会将代码添加到

    ffmpeg-test-neon.so这个库中

    最后在Android端调用

    VideoPlayer这个函数就会自动播放视频的声音,测试发现虽然声音正常但是有杂音,可能采样率设置的不对,获取其他的配置有问题,下一章着重解决这个问题,同时使用队列的方式来从视频中取音频包,然后从音频包队列中取出,然后解码播放。


  • 相关阅读:
    ArcGIS Engine 常用方法(转)
    正则表达式 C#System.Text.RegularExpressions.Regex
    ae中栅格数据转为矢量数据 (转)
    ArcEngine 渲染的使用 (转)
    C#字符串分割成数组,中间多空格
    <C++ GUI Programming with Qt4 ,Second Edition> 学习笔记
    perl module and its package
    static_cast reinterpret_cast
    阅读<inside the c++ object modle > 有感
    C++ virtual table
  • 原文地址:https://www.cnblogs.com/dyllove98/p/3127485.html
Copyright © 2011-2022 走看看